1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include <linux/pm_runtime.h> 30 #include <linux/vgaarb.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: runtime pm 37 * 38 * The i915 driver supports dynamic enabling and disabling of entire hardware 39 * blocks at runtime. This is especially important on the display side where 40 * software is supposed to control many power gates manually on recent hardware, 41 * since on the GT side a lot of the power management is done by the hardware. 42 * But even there some manual control at the device level is required. 43 * 44 * Since i915 supports a diverse set of platforms with a unified codebase and 45 * hardware engineers just love to shuffle functionality around between power 46 * domains there's a sizeable amount of indirection required. This file provides 47 * generic functions to the driver for grabbing and releasing references for 48 * abstract power domains. It then maps those to the actual power wells 49 * present for a given platform. 50 */ 51 52 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 53 for (i = 0; \ 54 i < (power_domains)->power_well_count && \ 55 ((power_well) = &(power_domains)->power_wells[i]); \ 56 i++) \ 57 for_each_if ((power_well)->domains & (domain_mask)) 58 59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 60 for (i = (power_domains)->power_well_count - 1; \ 61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 62 i--) \ 63 for_each_if ((power_well)->domains & (domain_mask)) 64 65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 66 int power_well_id); 67 68 static struct i915_power_well * 69 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id); 70 71 const char * 72 intel_display_power_domain_str(enum intel_display_power_domain domain) 73 { 74 switch (domain) { 75 case POWER_DOMAIN_PIPE_A: 76 return "PIPE_A"; 77 case POWER_DOMAIN_PIPE_B: 78 return "PIPE_B"; 79 case POWER_DOMAIN_PIPE_C: 80 return "PIPE_C"; 81 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 82 return "PIPE_A_PANEL_FITTER"; 83 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 84 return "PIPE_B_PANEL_FITTER"; 85 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 86 return "PIPE_C_PANEL_FITTER"; 87 case POWER_DOMAIN_TRANSCODER_A: 88 return "TRANSCODER_A"; 89 case POWER_DOMAIN_TRANSCODER_B: 90 return "TRANSCODER_B"; 91 case POWER_DOMAIN_TRANSCODER_C: 92 return "TRANSCODER_C"; 93 case POWER_DOMAIN_TRANSCODER_EDP: 94 return "TRANSCODER_EDP"; 95 case POWER_DOMAIN_TRANSCODER_DSI_A: 96 return "TRANSCODER_DSI_A"; 97 case POWER_DOMAIN_TRANSCODER_DSI_C: 98 return "TRANSCODER_DSI_C"; 99 case POWER_DOMAIN_PORT_DDI_A_LANES: 100 return "PORT_DDI_A_LANES"; 101 case POWER_DOMAIN_PORT_DDI_B_LANES: 102 return "PORT_DDI_B_LANES"; 103 case POWER_DOMAIN_PORT_DDI_C_LANES: 104 return "PORT_DDI_C_LANES"; 105 case POWER_DOMAIN_PORT_DDI_D_LANES: 106 return "PORT_DDI_D_LANES"; 107 case POWER_DOMAIN_PORT_DDI_E_LANES: 108 return "PORT_DDI_E_LANES"; 109 case POWER_DOMAIN_PORT_DSI: 110 return "PORT_DSI"; 111 case POWER_DOMAIN_PORT_CRT: 112 return "PORT_CRT"; 113 case POWER_DOMAIN_PORT_OTHER: 114 return "PORT_OTHER"; 115 case POWER_DOMAIN_VGA: 116 return "VGA"; 117 case POWER_DOMAIN_AUDIO: 118 return "AUDIO"; 119 case POWER_DOMAIN_PLLS: 120 return "PLLS"; 121 case POWER_DOMAIN_AUX_A: 122 return "AUX_A"; 123 case POWER_DOMAIN_AUX_B: 124 return "AUX_B"; 125 case POWER_DOMAIN_AUX_C: 126 return "AUX_C"; 127 case POWER_DOMAIN_AUX_D: 128 return "AUX_D"; 129 case POWER_DOMAIN_GMBUS: 130 return "GMBUS"; 131 case POWER_DOMAIN_INIT: 132 return "INIT"; 133 case POWER_DOMAIN_MODESET: 134 return "MODESET"; 135 default: 136 MISSING_CASE(domain); 137 return "?"; 138 } 139 } 140 141 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 142 struct i915_power_well *power_well) 143 { 144 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 145 power_well->ops->enable(dev_priv, power_well); 146 power_well->hw_enabled = true; 147 } 148 149 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 150 struct i915_power_well *power_well) 151 { 152 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 153 power_well->hw_enabled = false; 154 power_well->ops->disable(dev_priv, power_well); 155 } 156 157 static void intel_power_well_get(struct drm_i915_private *dev_priv, 158 struct i915_power_well *power_well) 159 { 160 if (!power_well->count++) 161 intel_power_well_enable(dev_priv, power_well); 162 } 163 164 static void intel_power_well_put(struct drm_i915_private *dev_priv, 165 struct i915_power_well *power_well) 166 { 167 WARN(!power_well->count, "Use count on power well %s is already zero", 168 power_well->name); 169 170 if (!--power_well->count) 171 intel_power_well_disable(dev_priv, power_well); 172 } 173 174 /* 175 * We should only use the power well if we explicitly asked the hardware to 176 * enable it, so check if it's enabled and also check if we've requested it to 177 * be enabled. 178 */ 179 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 180 struct i915_power_well *power_well) 181 { 182 return I915_READ(HSW_PWR_WELL_DRIVER) == 183 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 184 } 185 186 /** 187 * __intel_display_power_is_enabled - unlocked check for a power domain 188 * @dev_priv: i915 device instance 189 * @domain: power domain to check 190 * 191 * This is the unlocked version of intel_display_power_is_enabled() and should 192 * only be used from error capture and recovery code where deadlocks are 193 * possible. 194 * 195 * Returns: 196 * True when the power domain is enabled, false otherwise. 197 */ 198 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 199 enum intel_display_power_domain domain) 200 { 201 struct i915_power_domains *power_domains; 202 struct i915_power_well *power_well; 203 bool is_enabled; 204 int i; 205 206 if (dev_priv->pm.suspended) 207 return false; 208 209 power_domains = &dev_priv->power_domains; 210 211 is_enabled = true; 212 213 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 214 if (power_well->always_on) 215 continue; 216 217 if (!power_well->hw_enabled) { 218 is_enabled = false; 219 break; 220 } 221 } 222 223 return is_enabled; 224 } 225 226 /** 227 * intel_display_power_is_enabled - check for a power domain 228 * @dev_priv: i915 device instance 229 * @domain: power domain to check 230 * 231 * This function can be used to check the hw power domain state. It is mostly 232 * used in hardware state readout functions. Everywhere else code should rely 233 * upon explicit power domain reference counting to ensure that the hardware 234 * block is powered up before accessing it. 235 * 236 * Callers must hold the relevant modesetting locks to ensure that concurrent 237 * threads can't disable the power well while the caller tries to read a few 238 * registers. 239 * 240 * Returns: 241 * True when the power domain is enabled, false otherwise. 242 */ 243 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 244 enum intel_display_power_domain domain) 245 { 246 struct i915_power_domains *power_domains; 247 bool ret; 248 249 power_domains = &dev_priv->power_domains; 250 251 mutex_lock(&power_domains->lock); 252 ret = __intel_display_power_is_enabled(dev_priv, domain); 253 mutex_unlock(&power_domains->lock); 254 255 return ret; 256 } 257 258 /** 259 * intel_display_set_init_power - set the initial power domain state 260 * @dev_priv: i915 device instance 261 * @enable: whether to enable or disable the initial power domain state 262 * 263 * For simplicity our driver load/unload and system suspend/resume code assumes 264 * that all power domains are always enabled. This functions controls the state 265 * of this little hack. While the initial power domain state is enabled runtime 266 * pm is effectively disabled. 267 */ 268 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 269 bool enable) 270 { 271 if (dev_priv->power_domains.init_power_on == enable) 272 return; 273 274 if (enable) 275 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 276 else 277 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 278 279 dev_priv->power_domains.init_power_on = enable; 280 } 281 282 /* 283 * Starting with Haswell, we have a "Power Down Well" that can be turned off 284 * when not needed anymore. We have 4 registers that can request the power well 285 * to be enabled, and it will only be disabled if none of the registers is 286 * requesting it to be enabled. 287 */ 288 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 289 { 290 struct pci_dev *pdev = dev_priv->drm.pdev; 291 292 /* 293 * After we re-enable the power well, if we touch VGA register 0x3d5 294 * we'll get unclaimed register interrupts. This stops after we write 295 * anything to the VGA MSR register. The vgacon module uses this 296 * register all the time, so if we unbind our driver and, as a 297 * consequence, bind vgacon, we'll get stuck in an infinite loop at 298 * console_unlock(). So make here we touch the VGA MSR register, making 299 * sure vgacon can keep working normally without triggering interrupts 300 * and error messages. 301 */ 302 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 303 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 304 vga_put(pdev, VGA_RSRC_LEGACY_IO); 305 306 if (IS_BROADWELL(dev_priv)) 307 gen8_irq_power_well_post_enable(dev_priv, 308 1 << PIPE_C | 1 << PIPE_B); 309 } 310 311 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv) 312 { 313 if (IS_BROADWELL(dev_priv)) 314 gen8_irq_power_well_pre_disable(dev_priv, 315 1 << PIPE_C | 1 << PIPE_B); 316 } 317 318 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 319 struct i915_power_well *power_well) 320 { 321 struct pci_dev *pdev = dev_priv->drm.pdev; 322 323 /* 324 * After we re-enable the power well, if we touch VGA register 0x3d5 325 * we'll get unclaimed register interrupts. This stops after we write 326 * anything to the VGA MSR register. The vgacon module uses this 327 * register all the time, so if we unbind our driver and, as a 328 * consequence, bind vgacon, we'll get stuck in an infinite loop at 329 * console_unlock(). So make here we touch the VGA MSR register, making 330 * sure vgacon can keep working normally without triggering interrupts 331 * and error messages. 332 */ 333 if (power_well->id == SKL_DISP_PW_2) { 334 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 335 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 336 vga_put(pdev, VGA_RSRC_LEGACY_IO); 337 338 gen8_irq_power_well_post_enable(dev_priv, 339 1 << PIPE_C | 1 << PIPE_B); 340 } 341 } 342 343 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv, 344 struct i915_power_well *power_well) 345 { 346 if (power_well->id == SKL_DISP_PW_2) 347 gen8_irq_power_well_pre_disable(dev_priv, 348 1 << PIPE_C | 1 << PIPE_B); 349 } 350 351 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 352 struct i915_power_well *power_well, bool enable) 353 { 354 bool is_enabled, enable_requested; 355 uint32_t tmp; 356 357 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 358 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 359 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 360 361 if (enable) { 362 if (!enable_requested) 363 I915_WRITE(HSW_PWR_WELL_DRIVER, 364 HSW_PWR_WELL_ENABLE_REQUEST); 365 366 if (!is_enabled) { 367 DRM_DEBUG_KMS("Enabling power well\n"); 368 if (intel_wait_for_register(dev_priv, 369 HSW_PWR_WELL_DRIVER, 370 HSW_PWR_WELL_STATE_ENABLED, 371 HSW_PWR_WELL_STATE_ENABLED, 372 20)) 373 DRM_ERROR("Timeout enabling power well\n"); 374 hsw_power_well_post_enable(dev_priv); 375 } 376 377 } else { 378 if (enable_requested) { 379 hsw_power_well_pre_disable(dev_priv); 380 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 381 POSTING_READ(HSW_PWR_WELL_DRIVER); 382 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 383 } 384 } 385 } 386 387 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 388 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 389 BIT(POWER_DOMAIN_PIPE_B) | \ 390 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 391 BIT(POWER_DOMAIN_PIPE_C) | \ 392 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 393 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 394 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 395 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 396 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 397 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 398 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 399 BIT(POWER_DOMAIN_AUX_B) | \ 400 BIT(POWER_DOMAIN_AUX_C) | \ 401 BIT(POWER_DOMAIN_AUX_D) | \ 402 BIT(POWER_DOMAIN_AUDIO) | \ 403 BIT(POWER_DOMAIN_VGA) | \ 404 BIT(POWER_DOMAIN_INIT)) 405 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 406 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 407 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 408 BIT(POWER_DOMAIN_INIT)) 409 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 410 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 411 BIT(POWER_DOMAIN_INIT)) 412 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 413 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 414 BIT(POWER_DOMAIN_INIT)) 415 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 416 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 417 BIT(POWER_DOMAIN_INIT)) 418 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 419 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 420 BIT(POWER_DOMAIN_MODESET) | \ 421 BIT(POWER_DOMAIN_AUX_A) | \ 422 BIT(POWER_DOMAIN_INIT)) 423 424 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 425 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 426 BIT(POWER_DOMAIN_PIPE_B) | \ 427 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 428 BIT(POWER_DOMAIN_PIPE_C) | \ 429 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 430 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 431 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 432 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 433 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 434 BIT(POWER_DOMAIN_AUX_B) | \ 435 BIT(POWER_DOMAIN_AUX_C) | \ 436 BIT(POWER_DOMAIN_AUDIO) | \ 437 BIT(POWER_DOMAIN_VGA) | \ 438 BIT(POWER_DOMAIN_GMBUS) | \ 439 BIT(POWER_DOMAIN_INIT)) 440 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 441 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 442 BIT(POWER_DOMAIN_MODESET) | \ 443 BIT(POWER_DOMAIN_AUX_A) | \ 444 BIT(POWER_DOMAIN_INIT)) 445 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 446 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 447 BIT(POWER_DOMAIN_AUX_A) | \ 448 BIT(POWER_DOMAIN_INIT)) 449 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 450 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 451 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 452 BIT(POWER_DOMAIN_AUX_B) | \ 453 BIT(POWER_DOMAIN_AUX_C) | \ 454 BIT(POWER_DOMAIN_INIT)) 455 456 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 457 { 458 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 459 "DC9 already programmed to be enabled.\n"); 460 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 461 "DC5 still not disabled to enable DC9.\n"); 462 WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); 463 WARN_ONCE(intel_irqs_enabled(dev_priv), 464 "Interrupts not disabled yet.\n"); 465 466 /* 467 * TODO: check for the following to verify the conditions to enter DC9 468 * state are satisfied: 469 * 1] Check relevant display engine registers to verify if mode set 470 * disable sequence was followed. 471 * 2] Check if display uninitialize sequence is initialized. 472 */ 473 } 474 475 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 476 { 477 WARN_ONCE(intel_irqs_enabled(dev_priv), 478 "Interrupts not disabled yet.\n"); 479 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 480 "DC5 still not disabled.\n"); 481 482 /* 483 * TODO: check for the following to verify DC9 state was indeed 484 * entered before programming to disable it: 485 * 1] Check relevant display engine registers to verify if mode 486 * set disable sequence was followed. 487 * 2] Check if display uninitialize sequence is initialized. 488 */ 489 } 490 491 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 492 u32 state) 493 { 494 int rewrites = 0; 495 int rereads = 0; 496 u32 v; 497 498 I915_WRITE(DC_STATE_EN, state); 499 500 /* It has been observed that disabling the dc6 state sometimes 501 * doesn't stick and dmc keeps returning old value. Make sure 502 * the write really sticks enough times and also force rewrite until 503 * we are confident that state is exactly what we want. 504 */ 505 do { 506 v = I915_READ(DC_STATE_EN); 507 508 if (v != state) { 509 I915_WRITE(DC_STATE_EN, state); 510 rewrites++; 511 rereads = 0; 512 } else if (rereads++ > 5) { 513 break; 514 } 515 516 } while (rewrites < 100); 517 518 if (v != state) 519 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 520 state, v); 521 522 /* Most of the times we need one retry, avoid spam */ 523 if (rewrites > 1) 524 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 525 state, rewrites); 526 } 527 528 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 529 { 530 u32 mask; 531 532 mask = DC_STATE_EN_UPTO_DC5; 533 if (IS_BROXTON(dev_priv)) 534 mask |= DC_STATE_EN_DC9; 535 else 536 mask |= DC_STATE_EN_UPTO_DC6; 537 538 return mask; 539 } 540 541 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 542 { 543 u32 val; 544 545 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 546 547 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 548 dev_priv->csr.dc_state, val); 549 dev_priv->csr.dc_state = val; 550 } 551 552 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 553 { 554 uint32_t val; 555 uint32_t mask; 556 557 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 558 state &= dev_priv->csr.allowed_dc_mask; 559 560 val = I915_READ(DC_STATE_EN); 561 mask = gen9_dc_mask(dev_priv); 562 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 563 val & mask, state); 564 565 /* Check if DMC is ignoring our DC state requests */ 566 if ((val & mask) != dev_priv->csr.dc_state) 567 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 568 dev_priv->csr.dc_state, val & mask); 569 570 val &= ~mask; 571 val |= state; 572 573 gen9_write_dc_state(dev_priv, val); 574 575 dev_priv->csr.dc_state = val & mask; 576 } 577 578 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 579 { 580 assert_can_enable_dc9(dev_priv); 581 582 DRM_DEBUG_KMS("Enabling DC9\n"); 583 584 intel_power_sequencer_reset(dev_priv); 585 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 586 } 587 588 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 589 { 590 assert_can_disable_dc9(dev_priv); 591 592 DRM_DEBUG_KMS("Disabling DC9\n"); 593 594 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 595 596 intel_pps_unlock_regs_wa(dev_priv); 597 } 598 599 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 600 { 601 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 602 "CSR program storage start is NULL\n"); 603 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 604 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 605 } 606 607 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 608 { 609 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 610 SKL_DISP_PW_2); 611 612 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 613 614 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 615 "DC5 already programmed to be enabled.\n"); 616 assert_rpm_wakelock_held(dev_priv); 617 618 assert_csr_loaded(dev_priv); 619 } 620 621 void gen9_enable_dc5(struct drm_i915_private *dev_priv) 622 { 623 assert_can_enable_dc5(dev_priv); 624 625 DRM_DEBUG_KMS("Enabling DC5\n"); 626 627 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 628 } 629 630 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 631 { 632 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 633 "Backlight is not disabled.\n"); 634 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 635 "DC6 already programmed to be enabled.\n"); 636 637 assert_csr_loaded(dev_priv); 638 } 639 640 void skl_enable_dc6(struct drm_i915_private *dev_priv) 641 { 642 assert_can_enable_dc6(dev_priv); 643 644 DRM_DEBUG_KMS("Enabling DC6\n"); 645 646 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 647 648 } 649 650 void skl_disable_dc6(struct drm_i915_private *dev_priv) 651 { 652 DRM_DEBUG_KMS("Disabling DC6\n"); 653 654 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 655 } 656 657 static void 658 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, 659 struct i915_power_well *power_well) 660 { 661 enum skl_disp_power_wells power_well_id = power_well->id; 662 u32 val; 663 u32 mask; 664 665 mask = SKL_POWER_WELL_REQ(power_well_id); 666 667 val = I915_READ(HSW_PWR_WELL_KVMR); 668 if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n", 669 power_well->name)) 670 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask); 671 672 val = I915_READ(HSW_PWR_WELL_BIOS); 673 val |= I915_READ(HSW_PWR_WELL_DEBUG); 674 675 if (!(val & mask)) 676 return; 677 678 /* 679 * DMC is known to force on the request bits for power well 1 on SKL 680 * and BXT and the misc IO power well on SKL but we don't expect any 681 * other request bits to be set, so WARN for those. 682 */ 683 if (power_well_id == SKL_DISP_PW_1 || 684 ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 685 power_well_id == SKL_DISP_PW_MISC_IO)) 686 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on " 687 "by DMC\n", power_well->name); 688 else 689 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n", 690 power_well->name); 691 692 I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask); 693 I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask); 694 } 695 696 static void skl_set_power_well(struct drm_i915_private *dev_priv, 697 struct i915_power_well *power_well, bool enable) 698 { 699 uint32_t tmp, fuse_status; 700 uint32_t req_mask, state_mask; 701 bool is_enabled, enable_requested, check_fuse_status = false; 702 703 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 704 fuse_status = I915_READ(SKL_FUSE_STATUS); 705 706 switch (power_well->id) { 707 case SKL_DISP_PW_1: 708 if (intel_wait_for_register(dev_priv, 709 SKL_FUSE_STATUS, 710 SKL_FUSE_PG0_DIST_STATUS, 711 SKL_FUSE_PG0_DIST_STATUS, 712 1)) { 713 DRM_ERROR("PG0 not enabled\n"); 714 return; 715 } 716 break; 717 case SKL_DISP_PW_2: 718 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) { 719 DRM_ERROR("PG1 in disabled state\n"); 720 return; 721 } 722 break; 723 case SKL_DISP_PW_DDI_A_E: 724 case SKL_DISP_PW_DDI_B: 725 case SKL_DISP_PW_DDI_C: 726 case SKL_DISP_PW_DDI_D: 727 case SKL_DISP_PW_MISC_IO: 728 break; 729 default: 730 WARN(1, "Unknown power well %lu\n", power_well->id); 731 return; 732 } 733 734 req_mask = SKL_POWER_WELL_REQ(power_well->id); 735 enable_requested = tmp & req_mask; 736 state_mask = SKL_POWER_WELL_STATE(power_well->id); 737 is_enabled = tmp & state_mask; 738 739 if (!enable && enable_requested) 740 skl_power_well_pre_disable(dev_priv, power_well); 741 742 if (enable) { 743 if (!enable_requested) { 744 WARN((tmp & state_mask) && 745 !I915_READ(HSW_PWR_WELL_BIOS), 746 "Invalid for power well status to be enabled, unless done by the BIOS, \ 747 when request is to disable!\n"); 748 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 749 } 750 751 if (!is_enabled) { 752 DRM_DEBUG_KMS("Enabling %s\n", power_well->name); 753 check_fuse_status = true; 754 } 755 } else { 756 if (enable_requested) { 757 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 758 POSTING_READ(HSW_PWR_WELL_DRIVER); 759 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 760 } 761 762 if (IS_GEN9(dev_priv)) 763 gen9_sanitize_power_well_requests(dev_priv, power_well); 764 } 765 766 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable, 767 1)) 768 DRM_ERROR("%s %s timeout\n", 769 power_well->name, enable ? "enable" : "disable"); 770 771 if (check_fuse_status) { 772 if (power_well->id == SKL_DISP_PW_1) { 773 if (intel_wait_for_register(dev_priv, 774 SKL_FUSE_STATUS, 775 SKL_FUSE_PG1_DIST_STATUS, 776 SKL_FUSE_PG1_DIST_STATUS, 777 1)) 778 DRM_ERROR("PG1 distributing status timeout\n"); 779 } else if (power_well->id == SKL_DISP_PW_2) { 780 if (intel_wait_for_register(dev_priv, 781 SKL_FUSE_STATUS, 782 SKL_FUSE_PG2_DIST_STATUS, 783 SKL_FUSE_PG2_DIST_STATUS, 784 1)) 785 DRM_ERROR("PG2 distributing status timeout\n"); 786 } 787 } 788 789 if (enable && !is_enabled) 790 skl_power_well_post_enable(dev_priv, power_well); 791 } 792 793 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 794 struct i915_power_well *power_well) 795 { 796 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 797 798 /* 799 * We're taking over the BIOS, so clear any requests made by it since 800 * the driver is in charge now. 801 */ 802 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 803 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 804 } 805 806 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 807 struct i915_power_well *power_well) 808 { 809 hsw_set_power_well(dev_priv, power_well, true); 810 } 811 812 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 813 struct i915_power_well *power_well) 814 { 815 hsw_set_power_well(dev_priv, power_well, false); 816 } 817 818 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv, 819 struct i915_power_well *power_well) 820 { 821 uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) | 822 SKL_POWER_WELL_STATE(power_well->id); 823 824 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask; 825 } 826 827 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv, 828 struct i915_power_well *power_well) 829 { 830 skl_set_power_well(dev_priv, power_well, power_well->count > 0); 831 832 /* Clear any request made by BIOS as driver is taking over */ 833 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 834 } 835 836 static void skl_power_well_enable(struct drm_i915_private *dev_priv, 837 struct i915_power_well *power_well) 838 { 839 skl_set_power_well(dev_priv, power_well, true); 840 } 841 842 static void skl_power_well_disable(struct drm_i915_private *dev_priv, 843 struct i915_power_well *power_well) 844 { 845 skl_set_power_well(dev_priv, power_well, false); 846 } 847 848 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 849 struct i915_power_well *power_well) 850 { 851 bxt_ddi_phy_init(dev_priv, power_well->data); 852 } 853 854 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 855 struct i915_power_well *power_well) 856 { 857 bxt_ddi_phy_uninit(dev_priv, power_well->data); 858 } 859 860 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 861 struct i915_power_well *power_well) 862 { 863 return bxt_ddi_phy_is_enabled(dev_priv, power_well->data); 864 } 865 866 static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv, 867 struct i915_power_well *power_well) 868 { 869 if (power_well->count > 0) 870 bxt_dpio_cmn_power_well_enable(dev_priv, power_well); 871 else 872 bxt_dpio_cmn_power_well_disable(dev_priv, power_well); 873 } 874 875 876 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 877 { 878 struct i915_power_well *power_well; 879 880 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); 881 if (power_well->count > 0) 882 bxt_ddi_phy_verify_state(dev_priv, power_well->data); 883 884 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); 885 if (power_well->count > 0) 886 bxt_ddi_phy_verify_state(dev_priv, power_well->data); 887 } 888 889 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 890 struct i915_power_well *power_well) 891 { 892 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 893 } 894 895 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 896 { 897 u32 tmp = I915_READ(DBUF_CTL); 898 899 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 900 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 901 "Unexpected DBuf power power state (0x%08x)\n", tmp); 902 } 903 904 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 905 struct i915_power_well *power_well) 906 { 907 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 908 909 WARN_ON(dev_priv->cdclk_freq != 910 dev_priv->display.get_display_clock_speed(dev_priv)); 911 912 gen9_assert_dbuf_enabled(dev_priv); 913 914 if (IS_BROXTON(dev_priv)) 915 bxt_verify_ddi_phy_power_wells(dev_priv); 916 } 917 918 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 919 struct i915_power_well *power_well) 920 { 921 if (!dev_priv->csr.dmc_payload) 922 return; 923 924 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 925 skl_enable_dc6(dev_priv); 926 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 927 gen9_enable_dc5(dev_priv); 928 } 929 930 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, 931 struct i915_power_well *power_well) 932 { 933 if (power_well->count > 0) 934 gen9_dc_off_power_well_enable(dev_priv, power_well); 935 else 936 gen9_dc_off_power_well_disable(dev_priv, power_well); 937 } 938 939 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 940 struct i915_power_well *power_well) 941 { 942 } 943 944 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 945 struct i915_power_well *power_well) 946 { 947 return true; 948 } 949 950 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 951 struct i915_power_well *power_well, bool enable) 952 { 953 enum punit_power_well power_well_id = power_well->id; 954 u32 mask; 955 u32 state; 956 u32 ctrl; 957 958 mask = PUNIT_PWRGT_MASK(power_well_id); 959 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 960 PUNIT_PWRGT_PWR_GATE(power_well_id); 961 962 mutex_lock(&dev_priv->rps.hw_lock); 963 964 #define COND \ 965 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 966 967 if (COND) 968 goto out; 969 970 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 971 ctrl &= ~mask; 972 ctrl |= state; 973 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 974 975 if (wait_for(COND, 100)) 976 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 977 state, 978 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 979 980 #undef COND 981 982 out: 983 mutex_unlock(&dev_priv->rps.hw_lock); 984 } 985 986 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 987 struct i915_power_well *power_well) 988 { 989 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 990 } 991 992 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 993 struct i915_power_well *power_well) 994 { 995 vlv_set_power_well(dev_priv, power_well, true); 996 } 997 998 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 999 struct i915_power_well *power_well) 1000 { 1001 vlv_set_power_well(dev_priv, power_well, false); 1002 } 1003 1004 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1005 struct i915_power_well *power_well) 1006 { 1007 int power_well_id = power_well->id; 1008 bool enabled = false; 1009 u32 mask; 1010 u32 state; 1011 u32 ctrl; 1012 1013 mask = PUNIT_PWRGT_MASK(power_well_id); 1014 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 1015 1016 mutex_lock(&dev_priv->rps.hw_lock); 1017 1018 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1019 /* 1020 * We only ever set the power-on and power-gate states, anything 1021 * else is unexpected. 1022 */ 1023 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 1024 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 1025 if (state == ctrl) 1026 enabled = true; 1027 1028 /* 1029 * A transient state at this point would mean some unexpected party 1030 * is poking at the power controls too. 1031 */ 1032 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1033 WARN_ON(ctrl != state); 1034 1035 mutex_unlock(&dev_priv->rps.hw_lock); 1036 1037 return enabled; 1038 } 1039 1040 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1041 { 1042 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 1043 1044 /* 1045 * Disable trickle feed and enable pnd deadline calculation 1046 */ 1047 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1048 I915_WRITE(CBR1_VLV, 0); 1049 1050 WARN_ON(dev_priv->rawclk_freq == 0); 1051 1052 I915_WRITE(RAWCLK_FREQ_VLV, 1053 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1054 } 1055 1056 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1057 { 1058 struct intel_encoder *encoder; 1059 enum pipe pipe; 1060 1061 /* 1062 * Enable the CRI clock source so we can get at the 1063 * display and the reference clock for VGA 1064 * hotplug / manual detection. Supposedly DSI also 1065 * needs the ref clock up and running. 1066 * 1067 * CHV DPLL B/C have some issues if VGA mode is enabled. 1068 */ 1069 for_each_pipe(dev_priv, pipe) { 1070 u32 val = I915_READ(DPLL(pipe)); 1071 1072 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1073 if (pipe != PIPE_A) 1074 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1075 1076 I915_WRITE(DPLL(pipe), val); 1077 } 1078 1079 vlv_init_display_clock_gating(dev_priv); 1080 1081 spin_lock_irq(&dev_priv->irq_lock); 1082 valleyview_enable_display_irqs(dev_priv); 1083 spin_unlock_irq(&dev_priv->irq_lock); 1084 1085 /* 1086 * During driver initialization/resume we can avoid restoring the 1087 * part of the HW/SW state that will be inited anyway explicitly. 1088 */ 1089 if (dev_priv->power_domains.initializing) 1090 return; 1091 1092 intel_hpd_init(dev_priv); 1093 1094 /* Re-enable the ADPA, if we have one */ 1095 for_each_intel_encoder(&dev_priv->drm, encoder) { 1096 if (encoder->type == INTEL_OUTPUT_ANALOG) 1097 intel_crt_reset(&encoder->base); 1098 } 1099 1100 i915_redisable_vga_power_on(dev_priv); 1101 1102 intel_pps_unlock_regs_wa(dev_priv); 1103 } 1104 1105 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1106 { 1107 spin_lock_irq(&dev_priv->irq_lock); 1108 valleyview_disable_display_irqs(dev_priv); 1109 spin_unlock_irq(&dev_priv->irq_lock); 1110 1111 /* make sure we're done processing display irqs */ 1112 synchronize_irq(dev_priv->drm.irq); 1113 1114 intel_power_sequencer_reset(dev_priv); 1115 1116 /* Prevent us from re-enabling polling on accident in late suspend */ 1117 if (!dev_priv->drm.dev->power.is_suspended) 1118 intel_hpd_poll_init(dev_priv); 1119 } 1120 1121 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1122 struct i915_power_well *power_well) 1123 { 1124 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D); 1125 1126 vlv_set_power_well(dev_priv, power_well, true); 1127 1128 vlv_display_power_well_init(dev_priv); 1129 } 1130 1131 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1132 struct i915_power_well *power_well) 1133 { 1134 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D); 1135 1136 vlv_display_power_well_deinit(dev_priv); 1137 1138 vlv_set_power_well(dev_priv, power_well, false); 1139 } 1140 1141 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1142 struct i915_power_well *power_well) 1143 { 1144 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC); 1145 1146 /* since ref/cri clock was enabled */ 1147 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1148 1149 vlv_set_power_well(dev_priv, power_well, true); 1150 1151 /* 1152 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1153 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1154 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1155 * b. The other bits such as sfr settings / modesel may all 1156 * be set to 0. 1157 * 1158 * This should only be done on init and resume from S3 with 1159 * both PLLs disabled, or we risk losing DPIO and PLL 1160 * synchronization. 1161 */ 1162 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1163 } 1164 1165 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1166 struct i915_power_well *power_well) 1167 { 1168 enum pipe pipe; 1169 1170 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC); 1171 1172 for_each_pipe(dev_priv, pipe) 1173 assert_pll_disabled(dev_priv, pipe); 1174 1175 /* Assert common reset */ 1176 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1177 1178 vlv_set_power_well(dev_priv, power_well, false); 1179 } 1180 1181 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 1182 1183 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 1184 int power_well_id) 1185 { 1186 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1187 int i; 1188 1189 for (i = 0; i < power_domains->power_well_count; i++) { 1190 struct i915_power_well *power_well; 1191 1192 power_well = &power_domains->power_wells[i]; 1193 if (power_well->id == power_well_id) 1194 return power_well; 1195 } 1196 1197 return NULL; 1198 } 1199 1200 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1201 1202 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1203 { 1204 struct i915_power_well *cmn_bc = 1205 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1206 struct i915_power_well *cmn_d = 1207 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1208 u32 phy_control = dev_priv->chv_phy_control; 1209 u32 phy_status = 0; 1210 u32 phy_status_mask = 0xffffffff; 1211 1212 /* 1213 * The BIOS can leave the PHY is some weird state 1214 * where it doesn't fully power down some parts. 1215 * Disable the asserts until the PHY has been fully 1216 * reset (ie. the power well has been disabled at 1217 * least once). 1218 */ 1219 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1220 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1221 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1222 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1223 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1224 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1225 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1226 1227 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1228 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1229 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1230 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1231 1232 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1233 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1234 1235 /* this assumes override is only used to enable lanes */ 1236 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1237 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1238 1239 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1240 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1241 1242 /* CL1 is on whenever anything is on in either channel */ 1243 if (BITS_SET(phy_control, 1244 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1245 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1246 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1247 1248 /* 1249 * The DPLLB check accounts for the pipe B + port A usage 1250 * with CL2 powered up but all the lanes in the second channel 1251 * powered down. 1252 */ 1253 if (BITS_SET(phy_control, 1254 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1255 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1256 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1257 1258 if (BITS_SET(phy_control, 1259 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1260 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1261 if (BITS_SET(phy_control, 1262 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1263 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1264 1265 if (BITS_SET(phy_control, 1266 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1267 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1268 if (BITS_SET(phy_control, 1269 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1270 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1271 } 1272 1273 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1274 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1275 1276 /* this assumes override is only used to enable lanes */ 1277 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1278 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1279 1280 if (BITS_SET(phy_control, 1281 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1282 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1283 1284 if (BITS_SET(phy_control, 1285 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1286 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1287 if (BITS_SET(phy_control, 1288 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1289 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1290 } 1291 1292 phy_status &= phy_status_mask; 1293 1294 /* 1295 * The PHY may be busy with some initial calibration and whatnot, 1296 * so the power state can take a while to actually change. 1297 */ 1298 if (intel_wait_for_register(dev_priv, 1299 DISPLAY_PHY_STATUS, 1300 phy_status_mask, 1301 phy_status, 1302 10)) 1303 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1304 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1305 phy_status, dev_priv->chv_phy_control); 1306 } 1307 1308 #undef BITS_SET 1309 1310 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1311 struct i915_power_well *power_well) 1312 { 1313 enum dpio_phy phy; 1314 enum pipe pipe; 1315 uint32_t tmp; 1316 1317 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC && 1318 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D); 1319 1320 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1321 pipe = PIPE_A; 1322 phy = DPIO_PHY0; 1323 } else { 1324 pipe = PIPE_C; 1325 phy = DPIO_PHY1; 1326 } 1327 1328 /* since ref/cri clock was enabled */ 1329 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1330 vlv_set_power_well(dev_priv, power_well, true); 1331 1332 /* Poll for phypwrgood signal */ 1333 if (intel_wait_for_register(dev_priv, 1334 DISPLAY_PHY_STATUS, 1335 PHY_POWERGOOD(phy), 1336 PHY_POWERGOOD(phy), 1337 1)) 1338 DRM_ERROR("Display PHY %d is not power up\n", phy); 1339 1340 mutex_lock(&dev_priv->sb_lock); 1341 1342 /* Enable dynamic power down */ 1343 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1344 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1345 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1346 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1347 1348 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1349 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1350 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1351 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1352 } else { 1353 /* 1354 * Force the non-existing CL2 off. BXT does this 1355 * too, so maybe it saves some power even though 1356 * CL2 doesn't exist? 1357 */ 1358 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1359 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1360 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1361 } 1362 1363 mutex_unlock(&dev_priv->sb_lock); 1364 1365 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1366 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1367 1368 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1369 phy, dev_priv->chv_phy_control); 1370 1371 assert_chv_phy_status(dev_priv); 1372 } 1373 1374 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1375 struct i915_power_well *power_well) 1376 { 1377 enum dpio_phy phy; 1378 1379 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC && 1380 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D); 1381 1382 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1383 phy = DPIO_PHY0; 1384 assert_pll_disabled(dev_priv, PIPE_A); 1385 assert_pll_disabled(dev_priv, PIPE_B); 1386 } else { 1387 phy = DPIO_PHY1; 1388 assert_pll_disabled(dev_priv, PIPE_C); 1389 } 1390 1391 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1392 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1393 1394 vlv_set_power_well(dev_priv, power_well, false); 1395 1396 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1397 phy, dev_priv->chv_phy_control); 1398 1399 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1400 dev_priv->chv_phy_assert[phy] = true; 1401 1402 assert_chv_phy_status(dev_priv); 1403 } 1404 1405 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1406 enum dpio_channel ch, bool override, unsigned int mask) 1407 { 1408 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1409 u32 reg, val, expected, actual; 1410 1411 /* 1412 * The BIOS can leave the PHY is some weird state 1413 * where it doesn't fully power down some parts. 1414 * Disable the asserts until the PHY has been fully 1415 * reset (ie. the power well has been disabled at 1416 * least once). 1417 */ 1418 if (!dev_priv->chv_phy_assert[phy]) 1419 return; 1420 1421 if (ch == DPIO_CH0) 1422 reg = _CHV_CMN_DW0_CH0; 1423 else 1424 reg = _CHV_CMN_DW6_CH1; 1425 1426 mutex_lock(&dev_priv->sb_lock); 1427 val = vlv_dpio_read(dev_priv, pipe, reg); 1428 mutex_unlock(&dev_priv->sb_lock); 1429 1430 /* 1431 * This assumes !override is only used when the port is disabled. 1432 * All lanes should power down even without the override when 1433 * the port is disabled. 1434 */ 1435 if (!override || mask == 0xf) { 1436 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1437 /* 1438 * If CH1 common lane is not active anymore 1439 * (eg. for pipe B DPLL) the entire channel will 1440 * shut down, which causes the common lane registers 1441 * to read as 0. That means we can't actually check 1442 * the lane power down status bits, but as the entire 1443 * register reads as 0 it's a good indication that the 1444 * channel is indeed entirely powered down. 1445 */ 1446 if (ch == DPIO_CH1 && val == 0) 1447 expected = 0; 1448 } else if (mask != 0x0) { 1449 expected = DPIO_ANYDL_POWERDOWN; 1450 } else { 1451 expected = 0; 1452 } 1453 1454 if (ch == DPIO_CH0) 1455 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1456 else 1457 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1458 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1459 1460 WARN(actual != expected, 1461 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1462 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1463 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1464 reg, val); 1465 } 1466 1467 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1468 enum dpio_channel ch, bool override) 1469 { 1470 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1471 bool was_override; 1472 1473 mutex_lock(&power_domains->lock); 1474 1475 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1476 1477 if (override == was_override) 1478 goto out; 1479 1480 if (override) 1481 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1482 else 1483 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1484 1485 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1486 1487 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1488 phy, ch, dev_priv->chv_phy_control); 1489 1490 assert_chv_phy_status(dev_priv); 1491 1492 out: 1493 mutex_unlock(&power_domains->lock); 1494 1495 return was_override; 1496 } 1497 1498 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1499 bool override, unsigned int mask) 1500 { 1501 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1502 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1503 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1504 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1505 1506 mutex_lock(&power_domains->lock); 1507 1508 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1509 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1510 1511 if (override) 1512 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1513 else 1514 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1515 1516 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1517 1518 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1519 phy, ch, mask, dev_priv->chv_phy_control); 1520 1521 assert_chv_phy_status(dev_priv); 1522 1523 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1524 1525 mutex_unlock(&power_domains->lock); 1526 } 1527 1528 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1529 struct i915_power_well *power_well) 1530 { 1531 enum pipe pipe = power_well->id; 1532 bool enabled; 1533 u32 state, ctrl; 1534 1535 mutex_lock(&dev_priv->rps.hw_lock); 1536 1537 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1538 /* 1539 * We only ever set the power-on and power-gate states, anything 1540 * else is unexpected. 1541 */ 1542 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1543 enabled = state == DP_SSS_PWR_ON(pipe); 1544 1545 /* 1546 * A transient state at this point would mean some unexpected party 1547 * is poking at the power controls too. 1548 */ 1549 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1550 WARN_ON(ctrl << 16 != state); 1551 1552 mutex_unlock(&dev_priv->rps.hw_lock); 1553 1554 return enabled; 1555 } 1556 1557 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1558 struct i915_power_well *power_well, 1559 bool enable) 1560 { 1561 enum pipe pipe = power_well->id; 1562 u32 state; 1563 u32 ctrl; 1564 1565 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1566 1567 mutex_lock(&dev_priv->rps.hw_lock); 1568 1569 #define COND \ 1570 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1571 1572 if (COND) 1573 goto out; 1574 1575 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1576 ctrl &= ~DP_SSC_MASK(pipe); 1577 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1578 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1579 1580 if (wait_for(COND, 100)) 1581 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1582 state, 1583 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1584 1585 #undef COND 1586 1587 out: 1588 mutex_unlock(&dev_priv->rps.hw_lock); 1589 } 1590 1591 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1592 struct i915_power_well *power_well) 1593 { 1594 WARN_ON_ONCE(power_well->id != PIPE_A); 1595 1596 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 1597 } 1598 1599 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1600 struct i915_power_well *power_well) 1601 { 1602 WARN_ON_ONCE(power_well->id != PIPE_A); 1603 1604 chv_set_pipe_power_well(dev_priv, power_well, true); 1605 1606 vlv_display_power_well_init(dev_priv); 1607 } 1608 1609 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1610 struct i915_power_well *power_well) 1611 { 1612 WARN_ON_ONCE(power_well->id != PIPE_A); 1613 1614 vlv_display_power_well_deinit(dev_priv); 1615 1616 chv_set_pipe_power_well(dev_priv, power_well, false); 1617 } 1618 1619 static void 1620 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1621 enum intel_display_power_domain domain) 1622 { 1623 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1624 struct i915_power_well *power_well; 1625 int i; 1626 1627 for_each_power_well(i, power_well, BIT(domain), power_domains) 1628 intel_power_well_get(dev_priv, power_well); 1629 1630 power_domains->domain_use_count[domain]++; 1631 } 1632 1633 /** 1634 * intel_display_power_get - grab a power domain reference 1635 * @dev_priv: i915 device instance 1636 * @domain: power domain to reference 1637 * 1638 * This function grabs a power domain reference for @domain and ensures that the 1639 * power domain and all its parents are powered up. Therefore users should only 1640 * grab a reference to the innermost power domain they need. 1641 * 1642 * Any power domain reference obtained by this function must have a symmetric 1643 * call to intel_display_power_put() to release the reference again. 1644 */ 1645 void intel_display_power_get(struct drm_i915_private *dev_priv, 1646 enum intel_display_power_domain domain) 1647 { 1648 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1649 1650 intel_runtime_pm_get(dev_priv); 1651 1652 mutex_lock(&power_domains->lock); 1653 1654 __intel_display_power_get_domain(dev_priv, domain); 1655 1656 mutex_unlock(&power_domains->lock); 1657 } 1658 1659 /** 1660 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1661 * @dev_priv: i915 device instance 1662 * @domain: power domain to reference 1663 * 1664 * This function grabs a power domain reference for @domain and ensures that the 1665 * power domain and all its parents are powered up. Therefore users should only 1666 * grab a reference to the innermost power domain they need. 1667 * 1668 * Any power domain reference obtained by this function must have a symmetric 1669 * call to intel_display_power_put() to release the reference again. 1670 */ 1671 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1672 enum intel_display_power_domain domain) 1673 { 1674 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1675 bool is_enabled; 1676 1677 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 1678 return false; 1679 1680 mutex_lock(&power_domains->lock); 1681 1682 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1683 __intel_display_power_get_domain(dev_priv, domain); 1684 is_enabled = true; 1685 } else { 1686 is_enabled = false; 1687 } 1688 1689 mutex_unlock(&power_domains->lock); 1690 1691 if (!is_enabled) 1692 intel_runtime_pm_put(dev_priv); 1693 1694 return is_enabled; 1695 } 1696 1697 /** 1698 * intel_display_power_put - release a power domain reference 1699 * @dev_priv: i915 device instance 1700 * @domain: power domain to reference 1701 * 1702 * This function drops the power domain reference obtained by 1703 * intel_display_power_get() and might power down the corresponding hardware 1704 * block right away if this is the last reference. 1705 */ 1706 void intel_display_power_put(struct drm_i915_private *dev_priv, 1707 enum intel_display_power_domain domain) 1708 { 1709 struct i915_power_domains *power_domains; 1710 struct i915_power_well *power_well; 1711 int i; 1712 1713 power_domains = &dev_priv->power_domains; 1714 1715 mutex_lock(&power_domains->lock); 1716 1717 WARN(!power_domains->domain_use_count[domain], 1718 "Use count on domain %s is already zero\n", 1719 intel_display_power_domain_str(domain)); 1720 power_domains->domain_use_count[domain]--; 1721 1722 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) 1723 intel_power_well_put(dev_priv, power_well); 1724 1725 mutex_unlock(&power_domains->lock); 1726 1727 intel_runtime_pm_put(dev_priv); 1728 } 1729 1730 #define HSW_DISPLAY_POWER_DOMAINS ( \ 1731 BIT(POWER_DOMAIN_PIPE_B) | \ 1732 BIT(POWER_DOMAIN_PIPE_C) | \ 1733 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1734 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1735 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1736 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1737 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1738 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1739 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1740 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1741 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1742 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1743 BIT(POWER_DOMAIN_VGA) | \ 1744 BIT(POWER_DOMAIN_AUDIO) | \ 1745 BIT(POWER_DOMAIN_INIT)) 1746 1747 #define BDW_DISPLAY_POWER_DOMAINS ( \ 1748 BIT(POWER_DOMAIN_PIPE_B) | \ 1749 BIT(POWER_DOMAIN_PIPE_C) | \ 1750 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1751 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1752 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1753 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1754 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1755 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1756 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1757 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1758 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1759 BIT(POWER_DOMAIN_VGA) | \ 1760 BIT(POWER_DOMAIN_AUDIO) | \ 1761 BIT(POWER_DOMAIN_INIT)) 1762 1763 #define VLV_DISPLAY_POWER_DOMAINS ( \ 1764 BIT(POWER_DOMAIN_PIPE_A) | \ 1765 BIT(POWER_DOMAIN_PIPE_B) | \ 1766 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1767 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1768 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1769 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1770 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1771 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1772 BIT(POWER_DOMAIN_PORT_DSI) | \ 1773 BIT(POWER_DOMAIN_PORT_CRT) | \ 1774 BIT(POWER_DOMAIN_VGA) | \ 1775 BIT(POWER_DOMAIN_AUDIO) | \ 1776 BIT(POWER_DOMAIN_AUX_B) | \ 1777 BIT(POWER_DOMAIN_AUX_C) | \ 1778 BIT(POWER_DOMAIN_GMBUS) | \ 1779 BIT(POWER_DOMAIN_INIT)) 1780 1781 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1782 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1783 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1784 BIT(POWER_DOMAIN_PORT_CRT) | \ 1785 BIT(POWER_DOMAIN_AUX_B) | \ 1786 BIT(POWER_DOMAIN_AUX_C) | \ 1787 BIT(POWER_DOMAIN_INIT)) 1788 1789 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1790 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1791 BIT(POWER_DOMAIN_AUX_B) | \ 1792 BIT(POWER_DOMAIN_INIT)) 1793 1794 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1795 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1796 BIT(POWER_DOMAIN_AUX_B) | \ 1797 BIT(POWER_DOMAIN_INIT)) 1798 1799 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1800 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1801 BIT(POWER_DOMAIN_AUX_C) | \ 1802 BIT(POWER_DOMAIN_INIT)) 1803 1804 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1805 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1806 BIT(POWER_DOMAIN_AUX_C) | \ 1807 BIT(POWER_DOMAIN_INIT)) 1808 1809 #define CHV_DISPLAY_POWER_DOMAINS ( \ 1810 BIT(POWER_DOMAIN_PIPE_A) | \ 1811 BIT(POWER_DOMAIN_PIPE_B) | \ 1812 BIT(POWER_DOMAIN_PIPE_C) | \ 1813 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1814 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1815 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1816 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1817 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1818 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1819 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1820 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1821 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1822 BIT(POWER_DOMAIN_PORT_DSI) | \ 1823 BIT(POWER_DOMAIN_VGA) | \ 1824 BIT(POWER_DOMAIN_AUDIO) | \ 1825 BIT(POWER_DOMAIN_AUX_B) | \ 1826 BIT(POWER_DOMAIN_AUX_C) | \ 1827 BIT(POWER_DOMAIN_AUX_D) | \ 1828 BIT(POWER_DOMAIN_GMBUS) | \ 1829 BIT(POWER_DOMAIN_INIT)) 1830 1831 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1832 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1833 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1834 BIT(POWER_DOMAIN_AUX_B) | \ 1835 BIT(POWER_DOMAIN_AUX_C) | \ 1836 BIT(POWER_DOMAIN_INIT)) 1837 1838 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1839 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1840 BIT(POWER_DOMAIN_AUX_D) | \ 1841 BIT(POWER_DOMAIN_INIT)) 1842 1843 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1844 .sync_hw = i9xx_always_on_power_well_noop, 1845 .enable = i9xx_always_on_power_well_noop, 1846 .disable = i9xx_always_on_power_well_noop, 1847 .is_enabled = i9xx_always_on_power_well_enabled, 1848 }; 1849 1850 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 1851 .sync_hw = chv_pipe_power_well_sync_hw, 1852 .enable = chv_pipe_power_well_enable, 1853 .disable = chv_pipe_power_well_disable, 1854 .is_enabled = chv_pipe_power_well_enabled, 1855 }; 1856 1857 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1858 .sync_hw = vlv_power_well_sync_hw, 1859 .enable = chv_dpio_cmn_power_well_enable, 1860 .disable = chv_dpio_cmn_power_well_disable, 1861 .is_enabled = vlv_power_well_enabled, 1862 }; 1863 1864 static struct i915_power_well i9xx_always_on_power_well[] = { 1865 { 1866 .name = "always-on", 1867 .always_on = 1, 1868 .domains = POWER_DOMAIN_MASK, 1869 .ops = &i9xx_always_on_power_well_ops, 1870 }, 1871 }; 1872 1873 static const struct i915_power_well_ops hsw_power_well_ops = { 1874 .sync_hw = hsw_power_well_sync_hw, 1875 .enable = hsw_power_well_enable, 1876 .disable = hsw_power_well_disable, 1877 .is_enabled = hsw_power_well_enabled, 1878 }; 1879 1880 static const struct i915_power_well_ops skl_power_well_ops = { 1881 .sync_hw = skl_power_well_sync_hw, 1882 .enable = skl_power_well_enable, 1883 .disable = skl_power_well_disable, 1884 .is_enabled = skl_power_well_enabled, 1885 }; 1886 1887 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1888 .sync_hw = gen9_dc_off_power_well_sync_hw, 1889 .enable = gen9_dc_off_power_well_enable, 1890 .disable = gen9_dc_off_power_well_disable, 1891 .is_enabled = gen9_dc_off_power_well_enabled, 1892 }; 1893 1894 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1895 .sync_hw = bxt_dpio_cmn_power_well_sync_hw, 1896 .enable = bxt_dpio_cmn_power_well_enable, 1897 .disable = bxt_dpio_cmn_power_well_disable, 1898 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1899 }; 1900 1901 static struct i915_power_well hsw_power_wells[] = { 1902 { 1903 .name = "always-on", 1904 .always_on = 1, 1905 .domains = POWER_DOMAIN_MASK, 1906 .ops = &i9xx_always_on_power_well_ops, 1907 }, 1908 { 1909 .name = "display", 1910 .domains = HSW_DISPLAY_POWER_DOMAINS, 1911 .ops = &hsw_power_well_ops, 1912 }, 1913 }; 1914 1915 static struct i915_power_well bdw_power_wells[] = { 1916 { 1917 .name = "always-on", 1918 .always_on = 1, 1919 .domains = POWER_DOMAIN_MASK, 1920 .ops = &i9xx_always_on_power_well_ops, 1921 }, 1922 { 1923 .name = "display", 1924 .domains = BDW_DISPLAY_POWER_DOMAINS, 1925 .ops = &hsw_power_well_ops, 1926 }, 1927 }; 1928 1929 static const struct i915_power_well_ops vlv_display_power_well_ops = { 1930 .sync_hw = vlv_power_well_sync_hw, 1931 .enable = vlv_display_power_well_enable, 1932 .disable = vlv_display_power_well_disable, 1933 .is_enabled = vlv_power_well_enabled, 1934 }; 1935 1936 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1937 .sync_hw = vlv_power_well_sync_hw, 1938 .enable = vlv_dpio_cmn_power_well_enable, 1939 .disable = vlv_dpio_cmn_power_well_disable, 1940 .is_enabled = vlv_power_well_enabled, 1941 }; 1942 1943 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1944 .sync_hw = vlv_power_well_sync_hw, 1945 .enable = vlv_power_well_enable, 1946 .disable = vlv_power_well_disable, 1947 .is_enabled = vlv_power_well_enabled, 1948 }; 1949 1950 static struct i915_power_well vlv_power_wells[] = { 1951 { 1952 .name = "always-on", 1953 .always_on = 1, 1954 .domains = POWER_DOMAIN_MASK, 1955 .ops = &i9xx_always_on_power_well_ops, 1956 .id = PUNIT_POWER_WELL_ALWAYS_ON, 1957 }, 1958 { 1959 .name = "display", 1960 .domains = VLV_DISPLAY_POWER_DOMAINS, 1961 .id = PUNIT_POWER_WELL_DISP2D, 1962 .ops = &vlv_display_power_well_ops, 1963 }, 1964 { 1965 .name = "dpio-tx-b-01", 1966 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1967 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1968 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1969 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1970 .ops = &vlv_dpio_power_well_ops, 1971 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 1972 }, 1973 { 1974 .name = "dpio-tx-b-23", 1975 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1976 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1977 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1978 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1979 .ops = &vlv_dpio_power_well_ops, 1980 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 1981 }, 1982 { 1983 .name = "dpio-tx-c-01", 1984 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1985 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1986 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1987 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1988 .ops = &vlv_dpio_power_well_ops, 1989 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 1990 }, 1991 { 1992 .name = "dpio-tx-c-23", 1993 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1994 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1995 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1996 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1997 .ops = &vlv_dpio_power_well_ops, 1998 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 1999 }, 2000 { 2001 .name = "dpio-common", 2002 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2003 .id = PUNIT_POWER_WELL_DPIO_CMN_BC, 2004 .ops = &vlv_dpio_cmn_power_well_ops, 2005 }, 2006 }; 2007 2008 static struct i915_power_well chv_power_wells[] = { 2009 { 2010 .name = "always-on", 2011 .always_on = 1, 2012 .domains = POWER_DOMAIN_MASK, 2013 .ops = &i9xx_always_on_power_well_ops, 2014 }, 2015 { 2016 .name = "display", 2017 /* 2018 * Pipe A power well is the new disp2d well. Pipe B and C 2019 * power wells don't actually exist. Pipe A power well is 2020 * required for any pipe to work. 2021 */ 2022 .domains = CHV_DISPLAY_POWER_DOMAINS, 2023 .id = PIPE_A, 2024 .ops = &chv_pipe_power_well_ops, 2025 }, 2026 { 2027 .name = "dpio-common-bc", 2028 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2029 .id = PUNIT_POWER_WELL_DPIO_CMN_BC, 2030 .ops = &chv_dpio_cmn_power_well_ops, 2031 }, 2032 { 2033 .name = "dpio-common-d", 2034 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2035 .id = PUNIT_POWER_WELL_DPIO_CMN_D, 2036 .ops = &chv_dpio_cmn_power_well_ops, 2037 }, 2038 }; 2039 2040 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2041 int power_well_id) 2042 { 2043 struct i915_power_well *power_well; 2044 bool ret; 2045 2046 power_well = lookup_power_well(dev_priv, power_well_id); 2047 ret = power_well->ops->is_enabled(dev_priv, power_well); 2048 2049 return ret; 2050 } 2051 2052 static struct i915_power_well skl_power_wells[] = { 2053 { 2054 .name = "always-on", 2055 .always_on = 1, 2056 .domains = POWER_DOMAIN_MASK, 2057 .ops = &i9xx_always_on_power_well_ops, 2058 .id = SKL_DISP_PW_ALWAYS_ON, 2059 }, 2060 { 2061 .name = "power well 1", 2062 /* Handled by the DMC firmware */ 2063 .domains = 0, 2064 .ops = &skl_power_well_ops, 2065 .id = SKL_DISP_PW_1, 2066 }, 2067 { 2068 .name = "MISC IO power well", 2069 /* Handled by the DMC firmware */ 2070 .domains = 0, 2071 .ops = &skl_power_well_ops, 2072 .id = SKL_DISP_PW_MISC_IO, 2073 }, 2074 { 2075 .name = "DC off", 2076 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 2077 .ops = &gen9_dc_off_power_well_ops, 2078 .id = SKL_DISP_PW_DC_OFF, 2079 }, 2080 { 2081 .name = "power well 2", 2082 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2083 .ops = &skl_power_well_ops, 2084 .id = SKL_DISP_PW_2, 2085 }, 2086 { 2087 .name = "DDI A/E power well", 2088 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS, 2089 .ops = &skl_power_well_ops, 2090 .id = SKL_DISP_PW_DDI_A_E, 2091 }, 2092 { 2093 .name = "DDI B power well", 2094 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS, 2095 .ops = &skl_power_well_ops, 2096 .id = SKL_DISP_PW_DDI_B, 2097 }, 2098 { 2099 .name = "DDI C power well", 2100 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS, 2101 .ops = &skl_power_well_ops, 2102 .id = SKL_DISP_PW_DDI_C, 2103 }, 2104 { 2105 .name = "DDI D power well", 2106 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS, 2107 .ops = &skl_power_well_ops, 2108 .id = SKL_DISP_PW_DDI_D, 2109 }, 2110 }; 2111 2112 static struct i915_power_well bxt_power_wells[] = { 2113 { 2114 .name = "always-on", 2115 .always_on = 1, 2116 .domains = POWER_DOMAIN_MASK, 2117 .ops = &i9xx_always_on_power_well_ops, 2118 }, 2119 { 2120 .name = "power well 1", 2121 .domains = 0, 2122 .ops = &skl_power_well_ops, 2123 .id = SKL_DISP_PW_1, 2124 }, 2125 { 2126 .name = "DC off", 2127 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 2128 .ops = &gen9_dc_off_power_well_ops, 2129 .id = SKL_DISP_PW_DC_OFF, 2130 }, 2131 { 2132 .name = "power well 2", 2133 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2134 .ops = &skl_power_well_ops, 2135 .id = SKL_DISP_PW_2, 2136 }, 2137 { 2138 .name = "dpio-common-a", 2139 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 2140 .ops = &bxt_dpio_cmn_power_well_ops, 2141 .id = BXT_DPIO_CMN_A, 2142 .data = DPIO_PHY1, 2143 }, 2144 { 2145 .name = "dpio-common-bc", 2146 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 2147 .ops = &bxt_dpio_cmn_power_well_ops, 2148 .id = BXT_DPIO_CMN_BC, 2149 .data = DPIO_PHY0, 2150 }, 2151 }; 2152 2153 static int 2154 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 2155 int disable_power_well) 2156 { 2157 if (disable_power_well >= 0) 2158 return !!disable_power_well; 2159 2160 return 1; 2161 } 2162 2163 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 2164 int enable_dc) 2165 { 2166 uint32_t mask; 2167 int requested_dc; 2168 int max_dc; 2169 2170 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2171 max_dc = 2; 2172 mask = 0; 2173 } else if (IS_BROXTON(dev_priv)) { 2174 max_dc = 1; 2175 /* 2176 * DC9 has a separate HW flow from the rest of the DC states, 2177 * not depending on the DMC firmware. It's needed by system 2178 * suspend/resume, so allow it unconditionally. 2179 */ 2180 mask = DC_STATE_EN_DC9; 2181 } else { 2182 max_dc = 0; 2183 mask = 0; 2184 } 2185 2186 if (!i915.disable_power_well) 2187 max_dc = 0; 2188 2189 if (enable_dc >= 0 && enable_dc <= max_dc) { 2190 requested_dc = enable_dc; 2191 } else if (enable_dc == -1) { 2192 requested_dc = max_dc; 2193 } else if (enable_dc > max_dc && enable_dc <= 2) { 2194 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 2195 enable_dc, max_dc); 2196 requested_dc = max_dc; 2197 } else { 2198 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 2199 requested_dc = max_dc; 2200 } 2201 2202 if (requested_dc > 1) 2203 mask |= DC_STATE_EN_UPTO_DC6; 2204 if (requested_dc > 0) 2205 mask |= DC_STATE_EN_UPTO_DC5; 2206 2207 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 2208 2209 return mask; 2210 } 2211 2212 #define set_power_wells(power_domains, __power_wells) ({ \ 2213 (power_domains)->power_wells = (__power_wells); \ 2214 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 2215 }) 2216 2217 /** 2218 * intel_power_domains_init - initializes the power domain structures 2219 * @dev_priv: i915 device instance 2220 * 2221 * Initializes the power domain structures for @dev_priv depending upon the 2222 * supported platform. 2223 */ 2224 int intel_power_domains_init(struct drm_i915_private *dev_priv) 2225 { 2226 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2227 2228 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 2229 i915.disable_power_well); 2230 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, 2231 i915.enable_dc); 2232 2233 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); 2234 2235 mutex_init(&power_domains->lock); 2236 2237 /* 2238 * The enabling order will be from lower to higher indexed wells, 2239 * the disabling order is reversed. 2240 */ 2241 if (IS_HASWELL(dev_priv)) { 2242 set_power_wells(power_domains, hsw_power_wells); 2243 } else if (IS_BROADWELL(dev_priv)) { 2244 set_power_wells(power_domains, bdw_power_wells); 2245 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2246 set_power_wells(power_domains, skl_power_wells); 2247 } else if (IS_BROXTON(dev_priv)) { 2248 set_power_wells(power_domains, bxt_power_wells); 2249 } else if (IS_CHERRYVIEW(dev_priv)) { 2250 set_power_wells(power_domains, chv_power_wells); 2251 } else if (IS_VALLEYVIEW(dev_priv)) { 2252 set_power_wells(power_domains, vlv_power_wells); 2253 } else { 2254 set_power_wells(power_domains, i9xx_always_on_power_well); 2255 } 2256 2257 return 0; 2258 } 2259 2260 /** 2261 * intel_power_domains_fini - finalizes the power domain structures 2262 * @dev_priv: i915 device instance 2263 * 2264 * Finalizes the power domain structures for @dev_priv depending upon the 2265 * supported platform. This function also disables runtime pm and ensures that 2266 * the device stays powered up so that the driver can be reloaded. 2267 */ 2268 void intel_power_domains_fini(struct drm_i915_private *dev_priv) 2269 { 2270 struct device *kdev = &dev_priv->drm.pdev->dev; 2271 2272 /* 2273 * The i915.ko module is still not prepared to be loaded when 2274 * the power well is not enabled, so just enable it in case 2275 * we're going to unload/reload. 2276 * The following also reacquires the RPM reference the core passed 2277 * to the driver during loading, which is dropped in 2278 * intel_runtime_pm_enable(). We have to hand back the control of the 2279 * device to the core with this reference held. 2280 */ 2281 intel_display_set_init_power(dev_priv, true); 2282 2283 /* Remove the refcount we took to keep power well support disabled. */ 2284 if (!i915.disable_power_well) 2285 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2286 2287 /* 2288 * Remove the refcount we took in intel_runtime_pm_enable() in case 2289 * the platform doesn't support runtime PM. 2290 */ 2291 if (!HAS_RUNTIME_PM(dev_priv)) 2292 pm_runtime_put(kdev); 2293 } 2294 2295 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 2296 { 2297 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2298 struct i915_power_well *power_well; 2299 int i; 2300 2301 mutex_lock(&power_domains->lock); 2302 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 2303 power_well->ops->sync_hw(dev_priv, power_well); 2304 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 2305 power_well); 2306 } 2307 mutex_unlock(&power_domains->lock); 2308 } 2309 2310 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 2311 { 2312 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 2313 POSTING_READ(DBUF_CTL); 2314 2315 udelay(10); 2316 2317 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 2318 DRM_ERROR("DBuf power enable timeout\n"); 2319 } 2320 2321 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 2322 { 2323 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 2324 POSTING_READ(DBUF_CTL); 2325 2326 udelay(10); 2327 2328 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 2329 DRM_ERROR("DBuf power disable timeout!\n"); 2330 } 2331 2332 static void skl_display_core_init(struct drm_i915_private *dev_priv, 2333 bool resume) 2334 { 2335 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2336 struct i915_power_well *well; 2337 uint32_t val; 2338 2339 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2340 2341 /* enable PCH reset handshake */ 2342 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2343 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); 2344 2345 /* enable PG1 and Misc I/O */ 2346 mutex_lock(&power_domains->lock); 2347 2348 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2349 intel_power_well_enable(dev_priv, well); 2350 2351 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2352 intel_power_well_enable(dev_priv, well); 2353 2354 mutex_unlock(&power_domains->lock); 2355 2356 skl_init_cdclk(dev_priv); 2357 2358 gen9_dbuf_enable(dev_priv); 2359 2360 if (resume && dev_priv->csr.dmc_payload) 2361 intel_csr_load_program(dev_priv); 2362 } 2363 2364 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 2365 { 2366 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2367 struct i915_power_well *well; 2368 2369 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2370 2371 gen9_dbuf_disable(dev_priv); 2372 2373 skl_uninit_cdclk(dev_priv); 2374 2375 /* The spec doesn't call for removing the reset handshake flag */ 2376 /* disable PG1 and Misc I/O */ 2377 2378 mutex_lock(&power_domains->lock); 2379 2380 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2381 intel_power_well_disable(dev_priv, well); 2382 2383 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2384 intel_power_well_disable(dev_priv, well); 2385 2386 mutex_unlock(&power_domains->lock); 2387 } 2388 2389 void bxt_display_core_init(struct drm_i915_private *dev_priv, 2390 bool resume) 2391 { 2392 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2393 struct i915_power_well *well; 2394 uint32_t val; 2395 2396 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2397 2398 /* 2399 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 2400 * or else the reset will hang because there is no PCH to respond. 2401 * Move the handshake programming to initialization sequence. 2402 * Previously was left up to BIOS. 2403 */ 2404 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2405 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 2406 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 2407 2408 /* Enable PG1 */ 2409 mutex_lock(&power_domains->lock); 2410 2411 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2412 intel_power_well_enable(dev_priv, well); 2413 2414 mutex_unlock(&power_domains->lock); 2415 2416 bxt_init_cdclk(dev_priv); 2417 2418 gen9_dbuf_enable(dev_priv); 2419 2420 if (resume && dev_priv->csr.dmc_payload) 2421 intel_csr_load_program(dev_priv); 2422 } 2423 2424 void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 2425 { 2426 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2427 struct i915_power_well *well; 2428 2429 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2430 2431 gen9_dbuf_disable(dev_priv); 2432 2433 bxt_uninit_cdclk(dev_priv); 2434 2435 /* The spec doesn't call for removing the reset handshake flag */ 2436 2437 /* Disable PG1 */ 2438 mutex_lock(&power_domains->lock); 2439 2440 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2441 intel_power_well_disable(dev_priv, well); 2442 2443 mutex_unlock(&power_domains->lock); 2444 } 2445 2446 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 2447 { 2448 struct i915_power_well *cmn_bc = 2449 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2450 struct i915_power_well *cmn_d = 2451 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 2452 2453 /* 2454 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 2455 * workaround never ever read DISPLAY_PHY_CONTROL, and 2456 * instead maintain a shadow copy ourselves. Use the actual 2457 * power well state and lane status to reconstruct the 2458 * expected initial value. 2459 */ 2460 dev_priv->chv_phy_control = 2461 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 2462 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 2463 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 2464 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 2465 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 2466 2467 /* 2468 * If all lanes are disabled we leave the override disabled 2469 * with all power down bits cleared to match the state we 2470 * would use after disabling the port. Otherwise enable the 2471 * override and set the lane powerdown bits accding to the 2472 * current lane status. 2473 */ 2474 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 2475 uint32_t status = I915_READ(DPLL(PIPE_A)); 2476 unsigned int mask; 2477 2478 mask = status & DPLL_PORTB_READY_MASK; 2479 if (mask == 0xf) 2480 mask = 0x0; 2481 else 2482 dev_priv->chv_phy_control |= 2483 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 2484 2485 dev_priv->chv_phy_control |= 2486 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 2487 2488 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 2489 if (mask == 0xf) 2490 mask = 0x0; 2491 else 2492 dev_priv->chv_phy_control |= 2493 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 2494 2495 dev_priv->chv_phy_control |= 2496 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 2497 2498 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 2499 2500 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 2501 } else { 2502 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 2503 } 2504 2505 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 2506 uint32_t status = I915_READ(DPIO_PHY_STATUS); 2507 unsigned int mask; 2508 2509 mask = status & DPLL_PORTD_READY_MASK; 2510 2511 if (mask == 0xf) 2512 mask = 0x0; 2513 else 2514 dev_priv->chv_phy_control |= 2515 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 2516 2517 dev_priv->chv_phy_control |= 2518 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 2519 2520 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 2521 2522 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 2523 } else { 2524 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 2525 } 2526 2527 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 2528 2529 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 2530 dev_priv->chv_phy_control); 2531 } 2532 2533 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2534 { 2535 struct i915_power_well *cmn = 2536 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2537 struct i915_power_well *disp2d = 2538 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 2539 2540 /* If the display might be already active skip this */ 2541 if (cmn->ops->is_enabled(dev_priv, cmn) && 2542 disp2d->ops->is_enabled(dev_priv, disp2d) && 2543 I915_READ(DPIO_CTL) & DPIO_CMNRST) 2544 return; 2545 2546 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 2547 2548 /* cmnlane needs DPLL registers */ 2549 disp2d->ops->enable(dev_priv, disp2d); 2550 2551 /* 2552 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 2553 * Need to assert and de-assert PHY SB reset by gating the 2554 * common lane power, then un-gating it. 2555 * Simply ungating isn't enough to reset the PHY enough to get 2556 * ports and lanes running. 2557 */ 2558 cmn->ops->disable(dev_priv, cmn); 2559 } 2560 2561 /** 2562 * intel_power_domains_init_hw - initialize hardware power domain state 2563 * @dev_priv: i915 device instance 2564 * @resume: Called from resume code paths or not 2565 * 2566 * This function initializes the hardware power domain state and enables all 2567 * power domains using intel_display_set_init_power(). 2568 */ 2569 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2570 { 2571 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2572 2573 power_domains->initializing = true; 2574 2575 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2576 skl_display_core_init(dev_priv, resume); 2577 } else if (IS_BROXTON(dev_priv)) { 2578 bxt_display_core_init(dev_priv, resume); 2579 } else if (IS_CHERRYVIEW(dev_priv)) { 2580 mutex_lock(&power_domains->lock); 2581 chv_phy_control_init(dev_priv); 2582 mutex_unlock(&power_domains->lock); 2583 } else if (IS_VALLEYVIEW(dev_priv)) { 2584 mutex_lock(&power_domains->lock); 2585 vlv_cmnlane_wa(dev_priv); 2586 mutex_unlock(&power_domains->lock); 2587 } 2588 2589 /* For now, we need the power well to be always enabled. */ 2590 intel_display_set_init_power(dev_priv, true); 2591 /* Disable power support if the user asked so. */ 2592 if (!i915.disable_power_well) 2593 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 2594 intel_power_domains_sync_hw(dev_priv); 2595 power_domains->initializing = false; 2596 } 2597 2598 /** 2599 * intel_power_domains_suspend - suspend power domain state 2600 * @dev_priv: i915 device instance 2601 * 2602 * This function prepares the hardware power domain state before entering 2603 * system suspend. It must be paired with intel_power_domains_init_hw(). 2604 */ 2605 void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2606 { 2607 /* 2608 * Even if power well support was disabled we still want to disable 2609 * power wells while we are system suspended. 2610 */ 2611 if (!i915.disable_power_well) 2612 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2613 2614 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 2615 skl_display_core_uninit(dev_priv); 2616 else if (IS_BROXTON(dev_priv)) 2617 bxt_display_core_uninit(dev_priv); 2618 } 2619 2620 /** 2621 * intel_runtime_pm_get - grab a runtime pm reference 2622 * @dev_priv: i915 device instance 2623 * 2624 * This function grabs a device-level runtime pm reference (mostly used for GEM 2625 * code to ensure the GTT or GT is on) and ensures that it is powered up. 2626 * 2627 * Any runtime pm reference obtained by this function must have a symmetric 2628 * call to intel_runtime_pm_put() to release the reference again. 2629 */ 2630 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2631 { 2632 struct pci_dev *pdev = dev_priv->drm.pdev; 2633 struct device *kdev = &pdev->dev; 2634 2635 pm_runtime_get_sync(kdev); 2636 2637 atomic_inc(&dev_priv->pm.wakeref_count); 2638 assert_rpm_wakelock_held(dev_priv); 2639 } 2640 2641 /** 2642 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use 2643 * @dev_priv: i915 device instance 2644 * 2645 * This function grabs a device-level runtime pm reference if the device is 2646 * already in use and ensures that it is powered up. 2647 * 2648 * Any runtime pm reference obtained by this function must have a symmetric 2649 * call to intel_runtime_pm_put() to release the reference again. 2650 */ 2651 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 2652 { 2653 struct pci_dev *pdev = dev_priv->drm.pdev; 2654 struct device *kdev = &pdev->dev; 2655 2656 if (IS_ENABLED(CONFIG_PM)) { 2657 int ret = pm_runtime_get_if_in_use(kdev); 2658 2659 /* 2660 * In cases runtime PM is disabled by the RPM core and we get 2661 * an -EINVAL return value we are not supposed to call this 2662 * function, since the power state is undefined. This applies 2663 * atm to the late/early system suspend/resume handlers. 2664 */ 2665 WARN_ON_ONCE(ret < 0); 2666 if (ret <= 0) 2667 return false; 2668 } 2669 2670 atomic_inc(&dev_priv->pm.wakeref_count); 2671 assert_rpm_wakelock_held(dev_priv); 2672 2673 return true; 2674 } 2675 2676 /** 2677 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2678 * @dev_priv: i915 device instance 2679 * 2680 * This function grabs a device-level runtime pm reference (mostly used for GEM 2681 * code to ensure the GTT or GT is on). 2682 * 2683 * It will _not_ power up the device but instead only check that it's powered 2684 * on. Therefore it is only valid to call this functions from contexts where 2685 * the device is known to be powered up and where trying to power it up would 2686 * result in hilarity and deadlocks. That pretty much means only the system 2687 * suspend/resume code where this is used to grab runtime pm references for 2688 * delayed setup down in work items. 2689 * 2690 * Any runtime pm reference obtained by this function must have a symmetric 2691 * call to intel_runtime_pm_put() to release the reference again. 2692 */ 2693 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2694 { 2695 struct pci_dev *pdev = dev_priv->drm.pdev; 2696 struct device *kdev = &pdev->dev; 2697 2698 assert_rpm_wakelock_held(dev_priv); 2699 pm_runtime_get_noresume(kdev); 2700 2701 atomic_inc(&dev_priv->pm.wakeref_count); 2702 } 2703 2704 /** 2705 * intel_runtime_pm_put - release a runtime pm reference 2706 * @dev_priv: i915 device instance 2707 * 2708 * This function drops the device-level runtime pm reference obtained by 2709 * intel_runtime_pm_get() and might power down the corresponding 2710 * hardware block right away if this is the last reference. 2711 */ 2712 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2713 { 2714 struct pci_dev *pdev = dev_priv->drm.pdev; 2715 struct device *kdev = &pdev->dev; 2716 2717 assert_rpm_wakelock_held(dev_priv); 2718 atomic_dec(&dev_priv->pm.wakeref_count); 2719 2720 pm_runtime_mark_last_busy(kdev); 2721 pm_runtime_put_autosuspend(kdev); 2722 } 2723 2724 /** 2725 * intel_runtime_pm_enable - enable runtime pm 2726 * @dev_priv: i915 device instance 2727 * 2728 * This function enables runtime pm at the end of the driver load sequence. 2729 * 2730 * Note that this function does currently not enable runtime pm for the 2731 * subordinate display power domains. That is only done on the first modeset 2732 * using intel_display_set_init_power(). 2733 */ 2734 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2735 { 2736 struct pci_dev *pdev = dev_priv->drm.pdev; 2737 struct device *kdev = &pdev->dev; 2738 2739 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ 2740 pm_runtime_mark_last_busy(kdev); 2741 2742 /* 2743 * Take a permanent reference to disable the RPM functionality and drop 2744 * it only when unloading the driver. Use the low level get/put helpers, 2745 * so the driver's own RPM reference tracking asserts also work on 2746 * platforms without RPM support. 2747 */ 2748 if (!HAS_RUNTIME_PM(dev_priv)) { 2749 pm_runtime_dont_use_autosuspend(kdev); 2750 pm_runtime_get_sync(kdev); 2751 } else { 2752 pm_runtime_use_autosuspend(kdev); 2753 } 2754 2755 /* 2756 * The core calls the driver load handler with an RPM reference held. 2757 * We drop that here and will reacquire it during unloading in 2758 * intel_power_domains_fini(). 2759 */ 2760 pm_runtime_put_autosuspend(kdev); 2761 } 2762