1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include <linux/pm_runtime.h> 30 #include <linux/vgaarb.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: runtime pm 37 * 38 * The i915 driver supports dynamic enabling and disabling of entire hardware 39 * blocks at runtime. This is especially important on the display side where 40 * software is supposed to control many power gates manually on recent hardware, 41 * since on the GT side a lot of the power management is done by the hardware. 42 * But even there some manual control at the device level is required. 43 * 44 * Since i915 supports a diverse set of platforms with a unified codebase and 45 * hardware engineers just love to shuffle functionality around between power 46 * domains there's a sizeable amount of indirection required. This file provides 47 * generic functions to the driver for grabbing and releasing references for 48 * abstract power domains. It then maps those to the actual power wells 49 * present for a given platform. 50 */ 51 52 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 53 for (i = 0; \ 54 i < (power_domains)->power_well_count && \ 55 ((power_well) = &(power_domains)->power_wells[i]); \ 56 i++) \ 57 for_each_if ((power_well)->domains & (domain_mask)) 58 59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 60 for (i = (power_domains)->power_well_count - 1; \ 61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 62 i--) \ 63 for_each_if ((power_well)->domains & (domain_mask)) 64 65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 66 int power_well_id); 67 68 static struct i915_power_well * 69 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id); 70 71 const char * 72 intel_display_power_domain_str(enum intel_display_power_domain domain) 73 { 74 switch (domain) { 75 case POWER_DOMAIN_PIPE_A: 76 return "PIPE_A"; 77 case POWER_DOMAIN_PIPE_B: 78 return "PIPE_B"; 79 case POWER_DOMAIN_PIPE_C: 80 return "PIPE_C"; 81 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 82 return "PIPE_A_PANEL_FITTER"; 83 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 84 return "PIPE_B_PANEL_FITTER"; 85 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 86 return "PIPE_C_PANEL_FITTER"; 87 case POWER_DOMAIN_TRANSCODER_A: 88 return "TRANSCODER_A"; 89 case POWER_DOMAIN_TRANSCODER_B: 90 return "TRANSCODER_B"; 91 case POWER_DOMAIN_TRANSCODER_C: 92 return "TRANSCODER_C"; 93 case POWER_DOMAIN_TRANSCODER_EDP: 94 return "TRANSCODER_EDP"; 95 case POWER_DOMAIN_TRANSCODER_DSI_A: 96 return "TRANSCODER_DSI_A"; 97 case POWER_DOMAIN_TRANSCODER_DSI_C: 98 return "TRANSCODER_DSI_C"; 99 case POWER_DOMAIN_PORT_DDI_A_LANES: 100 return "PORT_DDI_A_LANES"; 101 case POWER_DOMAIN_PORT_DDI_B_LANES: 102 return "PORT_DDI_B_LANES"; 103 case POWER_DOMAIN_PORT_DDI_C_LANES: 104 return "PORT_DDI_C_LANES"; 105 case POWER_DOMAIN_PORT_DDI_D_LANES: 106 return "PORT_DDI_D_LANES"; 107 case POWER_DOMAIN_PORT_DDI_E_LANES: 108 return "PORT_DDI_E_LANES"; 109 case POWER_DOMAIN_PORT_DSI: 110 return "PORT_DSI"; 111 case POWER_DOMAIN_PORT_CRT: 112 return "PORT_CRT"; 113 case POWER_DOMAIN_PORT_OTHER: 114 return "PORT_OTHER"; 115 case POWER_DOMAIN_VGA: 116 return "VGA"; 117 case POWER_DOMAIN_AUDIO: 118 return "AUDIO"; 119 case POWER_DOMAIN_PLLS: 120 return "PLLS"; 121 case POWER_DOMAIN_AUX_A: 122 return "AUX_A"; 123 case POWER_DOMAIN_AUX_B: 124 return "AUX_B"; 125 case POWER_DOMAIN_AUX_C: 126 return "AUX_C"; 127 case POWER_DOMAIN_AUX_D: 128 return "AUX_D"; 129 case POWER_DOMAIN_GMBUS: 130 return "GMBUS"; 131 case POWER_DOMAIN_INIT: 132 return "INIT"; 133 case POWER_DOMAIN_MODESET: 134 return "MODESET"; 135 default: 136 MISSING_CASE(domain); 137 return "?"; 138 } 139 } 140 141 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 142 struct i915_power_well *power_well) 143 { 144 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 145 power_well->ops->enable(dev_priv, power_well); 146 power_well->hw_enabled = true; 147 } 148 149 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 150 struct i915_power_well *power_well) 151 { 152 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 153 power_well->hw_enabled = false; 154 power_well->ops->disable(dev_priv, power_well); 155 } 156 157 static void intel_power_well_get(struct drm_i915_private *dev_priv, 158 struct i915_power_well *power_well) 159 { 160 if (!power_well->count++) 161 intel_power_well_enable(dev_priv, power_well); 162 } 163 164 static void intel_power_well_put(struct drm_i915_private *dev_priv, 165 struct i915_power_well *power_well) 166 { 167 WARN(!power_well->count, "Use count on power well %s is already zero", 168 power_well->name); 169 170 if (!--power_well->count) 171 intel_power_well_disable(dev_priv, power_well); 172 } 173 174 /* 175 * We should only use the power well if we explicitly asked the hardware to 176 * enable it, so check if it's enabled and also check if we've requested it to 177 * be enabled. 178 */ 179 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 180 struct i915_power_well *power_well) 181 { 182 return I915_READ(HSW_PWR_WELL_DRIVER) == 183 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 184 } 185 186 /** 187 * __intel_display_power_is_enabled - unlocked check for a power domain 188 * @dev_priv: i915 device instance 189 * @domain: power domain to check 190 * 191 * This is the unlocked version of intel_display_power_is_enabled() and should 192 * only be used from error capture and recovery code where deadlocks are 193 * possible. 194 * 195 * Returns: 196 * True when the power domain is enabled, false otherwise. 197 */ 198 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 199 enum intel_display_power_domain domain) 200 { 201 struct i915_power_domains *power_domains; 202 struct i915_power_well *power_well; 203 bool is_enabled; 204 int i; 205 206 if (dev_priv->pm.suspended) 207 return false; 208 209 power_domains = &dev_priv->power_domains; 210 211 is_enabled = true; 212 213 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 214 if (power_well->always_on) 215 continue; 216 217 if (!power_well->hw_enabled) { 218 is_enabled = false; 219 break; 220 } 221 } 222 223 return is_enabled; 224 } 225 226 /** 227 * intel_display_power_is_enabled - check for a power domain 228 * @dev_priv: i915 device instance 229 * @domain: power domain to check 230 * 231 * This function can be used to check the hw power domain state. It is mostly 232 * used in hardware state readout functions. Everywhere else code should rely 233 * upon explicit power domain reference counting to ensure that the hardware 234 * block is powered up before accessing it. 235 * 236 * Callers must hold the relevant modesetting locks to ensure that concurrent 237 * threads can't disable the power well while the caller tries to read a few 238 * registers. 239 * 240 * Returns: 241 * True when the power domain is enabled, false otherwise. 242 */ 243 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 244 enum intel_display_power_domain domain) 245 { 246 struct i915_power_domains *power_domains; 247 bool ret; 248 249 power_domains = &dev_priv->power_domains; 250 251 mutex_lock(&power_domains->lock); 252 ret = __intel_display_power_is_enabled(dev_priv, domain); 253 mutex_unlock(&power_domains->lock); 254 255 return ret; 256 } 257 258 /** 259 * intel_display_set_init_power - set the initial power domain state 260 * @dev_priv: i915 device instance 261 * @enable: whether to enable or disable the initial power domain state 262 * 263 * For simplicity our driver load/unload and system suspend/resume code assumes 264 * that all power domains are always enabled. This functions controls the state 265 * of this little hack. While the initial power domain state is enabled runtime 266 * pm is effectively disabled. 267 */ 268 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 269 bool enable) 270 { 271 if (dev_priv->power_domains.init_power_on == enable) 272 return; 273 274 if (enable) 275 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 276 else 277 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 278 279 dev_priv->power_domains.init_power_on = enable; 280 } 281 282 /* 283 * Starting with Haswell, we have a "Power Down Well" that can be turned off 284 * when not needed anymore. We have 4 registers that can request the power well 285 * to be enabled, and it will only be disabled if none of the registers is 286 * requesting it to be enabled. 287 */ 288 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 289 { 290 struct pci_dev *pdev = dev_priv->drm.pdev; 291 struct drm_device *dev = &dev_priv->drm; 292 293 /* 294 * After we re-enable the power well, if we touch VGA register 0x3d5 295 * we'll get unclaimed register interrupts. This stops after we write 296 * anything to the VGA MSR register. The vgacon module uses this 297 * register all the time, so if we unbind our driver and, as a 298 * consequence, bind vgacon, we'll get stuck in an infinite loop at 299 * console_unlock(). So make here we touch the VGA MSR register, making 300 * sure vgacon can keep working normally without triggering interrupts 301 * and error messages. 302 */ 303 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 304 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 305 vga_put(pdev, VGA_RSRC_LEGACY_IO); 306 307 if (IS_BROADWELL(dev)) 308 gen8_irq_power_well_post_enable(dev_priv, 309 1 << PIPE_C | 1 << PIPE_B); 310 } 311 312 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv) 313 { 314 if (IS_BROADWELL(dev_priv)) 315 gen8_irq_power_well_pre_disable(dev_priv, 316 1 << PIPE_C | 1 << PIPE_B); 317 } 318 319 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 320 struct i915_power_well *power_well) 321 { 322 struct pci_dev *pdev = dev_priv->drm.pdev; 323 324 /* 325 * After we re-enable the power well, if we touch VGA register 0x3d5 326 * we'll get unclaimed register interrupts. This stops after we write 327 * anything to the VGA MSR register. The vgacon module uses this 328 * register all the time, so if we unbind our driver and, as a 329 * consequence, bind vgacon, we'll get stuck in an infinite loop at 330 * console_unlock(). So make here we touch the VGA MSR register, making 331 * sure vgacon can keep working normally without triggering interrupts 332 * and error messages. 333 */ 334 if (power_well->data == SKL_DISP_PW_2) { 335 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 336 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 337 vga_put(pdev, VGA_RSRC_LEGACY_IO); 338 339 gen8_irq_power_well_post_enable(dev_priv, 340 1 << PIPE_C | 1 << PIPE_B); 341 } 342 } 343 344 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv, 345 struct i915_power_well *power_well) 346 { 347 if (power_well->data == SKL_DISP_PW_2) 348 gen8_irq_power_well_pre_disable(dev_priv, 349 1 << PIPE_C | 1 << PIPE_B); 350 } 351 352 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 353 struct i915_power_well *power_well, bool enable) 354 { 355 bool is_enabled, enable_requested; 356 uint32_t tmp; 357 358 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 359 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 360 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 361 362 if (enable) { 363 if (!enable_requested) 364 I915_WRITE(HSW_PWR_WELL_DRIVER, 365 HSW_PWR_WELL_ENABLE_REQUEST); 366 367 if (!is_enabled) { 368 DRM_DEBUG_KMS("Enabling power well\n"); 369 if (intel_wait_for_register(dev_priv, 370 HSW_PWR_WELL_DRIVER, 371 HSW_PWR_WELL_STATE_ENABLED, 372 HSW_PWR_WELL_STATE_ENABLED, 373 20)) 374 DRM_ERROR("Timeout enabling power well\n"); 375 hsw_power_well_post_enable(dev_priv); 376 } 377 378 } else { 379 if (enable_requested) { 380 hsw_power_well_pre_disable(dev_priv); 381 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 382 POSTING_READ(HSW_PWR_WELL_DRIVER); 383 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 384 } 385 } 386 } 387 388 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 389 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 390 BIT(POWER_DOMAIN_PIPE_B) | \ 391 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 392 BIT(POWER_DOMAIN_PIPE_C) | \ 393 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 394 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 395 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 396 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 397 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 398 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 399 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 400 BIT(POWER_DOMAIN_AUX_B) | \ 401 BIT(POWER_DOMAIN_AUX_C) | \ 402 BIT(POWER_DOMAIN_AUX_D) | \ 403 BIT(POWER_DOMAIN_AUDIO) | \ 404 BIT(POWER_DOMAIN_VGA) | \ 405 BIT(POWER_DOMAIN_INIT)) 406 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 407 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 408 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 409 BIT(POWER_DOMAIN_INIT)) 410 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 411 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 412 BIT(POWER_DOMAIN_INIT)) 413 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 414 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 415 BIT(POWER_DOMAIN_INIT)) 416 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 417 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 418 BIT(POWER_DOMAIN_INIT)) 419 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 420 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 421 BIT(POWER_DOMAIN_MODESET) | \ 422 BIT(POWER_DOMAIN_AUX_A) | \ 423 BIT(POWER_DOMAIN_INIT)) 424 425 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 426 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 427 BIT(POWER_DOMAIN_PIPE_B) | \ 428 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 429 BIT(POWER_DOMAIN_PIPE_C) | \ 430 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 431 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 432 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 433 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 434 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 435 BIT(POWER_DOMAIN_AUX_B) | \ 436 BIT(POWER_DOMAIN_AUX_C) | \ 437 BIT(POWER_DOMAIN_AUDIO) | \ 438 BIT(POWER_DOMAIN_VGA) | \ 439 BIT(POWER_DOMAIN_GMBUS) | \ 440 BIT(POWER_DOMAIN_INIT)) 441 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 442 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 443 BIT(POWER_DOMAIN_MODESET) | \ 444 BIT(POWER_DOMAIN_AUX_A) | \ 445 BIT(POWER_DOMAIN_INIT)) 446 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 447 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 448 BIT(POWER_DOMAIN_AUX_A) | \ 449 BIT(POWER_DOMAIN_INIT)) 450 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 451 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 452 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 453 BIT(POWER_DOMAIN_AUX_B) | \ 454 BIT(POWER_DOMAIN_AUX_C) | \ 455 BIT(POWER_DOMAIN_INIT)) 456 457 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 458 { 459 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 460 "DC9 already programmed to be enabled.\n"); 461 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 462 "DC5 still not disabled to enable DC9.\n"); 463 WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); 464 WARN_ONCE(intel_irqs_enabled(dev_priv), 465 "Interrupts not disabled yet.\n"); 466 467 /* 468 * TODO: check for the following to verify the conditions to enter DC9 469 * state are satisfied: 470 * 1] Check relevant display engine registers to verify if mode set 471 * disable sequence was followed. 472 * 2] Check if display uninitialize sequence is initialized. 473 */ 474 } 475 476 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 477 { 478 WARN_ONCE(intel_irqs_enabled(dev_priv), 479 "Interrupts not disabled yet.\n"); 480 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 481 "DC5 still not disabled.\n"); 482 483 /* 484 * TODO: check for the following to verify DC9 state was indeed 485 * entered before programming to disable it: 486 * 1] Check relevant display engine registers to verify if mode 487 * set disable sequence was followed. 488 * 2] Check if display uninitialize sequence is initialized. 489 */ 490 } 491 492 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 493 u32 state) 494 { 495 int rewrites = 0; 496 int rereads = 0; 497 u32 v; 498 499 I915_WRITE(DC_STATE_EN, state); 500 501 /* It has been observed that disabling the dc6 state sometimes 502 * doesn't stick and dmc keeps returning old value. Make sure 503 * the write really sticks enough times and also force rewrite until 504 * we are confident that state is exactly what we want. 505 */ 506 do { 507 v = I915_READ(DC_STATE_EN); 508 509 if (v != state) { 510 I915_WRITE(DC_STATE_EN, state); 511 rewrites++; 512 rereads = 0; 513 } else if (rereads++ > 5) { 514 break; 515 } 516 517 } while (rewrites < 100); 518 519 if (v != state) 520 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 521 state, v); 522 523 /* Most of the times we need one retry, avoid spam */ 524 if (rewrites > 1) 525 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 526 state, rewrites); 527 } 528 529 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 530 { 531 u32 mask; 532 533 mask = DC_STATE_EN_UPTO_DC5; 534 if (IS_BROXTON(dev_priv)) 535 mask |= DC_STATE_EN_DC9; 536 else 537 mask |= DC_STATE_EN_UPTO_DC6; 538 539 return mask; 540 } 541 542 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 543 { 544 u32 val; 545 546 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 547 548 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 549 dev_priv->csr.dc_state, val); 550 dev_priv->csr.dc_state = val; 551 } 552 553 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 554 { 555 uint32_t val; 556 uint32_t mask; 557 558 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 559 state &= dev_priv->csr.allowed_dc_mask; 560 561 val = I915_READ(DC_STATE_EN); 562 mask = gen9_dc_mask(dev_priv); 563 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 564 val & mask, state); 565 566 /* Check if DMC is ignoring our DC state requests */ 567 if ((val & mask) != dev_priv->csr.dc_state) 568 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 569 dev_priv->csr.dc_state, val & mask); 570 571 val &= ~mask; 572 val |= state; 573 574 gen9_write_dc_state(dev_priv, val); 575 576 dev_priv->csr.dc_state = val & mask; 577 } 578 579 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 580 { 581 assert_can_enable_dc9(dev_priv); 582 583 DRM_DEBUG_KMS("Enabling DC9\n"); 584 585 intel_power_sequencer_reset(dev_priv); 586 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 587 } 588 589 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 590 { 591 assert_can_disable_dc9(dev_priv); 592 593 DRM_DEBUG_KMS("Disabling DC9\n"); 594 595 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 596 597 intel_pps_unlock_regs_wa(dev_priv); 598 } 599 600 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 601 { 602 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 603 "CSR program storage start is NULL\n"); 604 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 605 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 606 } 607 608 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 609 { 610 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 611 SKL_DISP_PW_2); 612 613 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 614 615 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 616 "DC5 already programmed to be enabled.\n"); 617 assert_rpm_wakelock_held(dev_priv); 618 619 assert_csr_loaded(dev_priv); 620 } 621 622 void gen9_enable_dc5(struct drm_i915_private *dev_priv) 623 { 624 assert_can_enable_dc5(dev_priv); 625 626 DRM_DEBUG_KMS("Enabling DC5\n"); 627 628 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 629 } 630 631 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 632 { 633 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 634 "Backlight is not disabled.\n"); 635 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 636 "DC6 already programmed to be enabled.\n"); 637 638 assert_csr_loaded(dev_priv); 639 } 640 641 void skl_enable_dc6(struct drm_i915_private *dev_priv) 642 { 643 assert_can_enable_dc6(dev_priv); 644 645 DRM_DEBUG_KMS("Enabling DC6\n"); 646 647 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 648 649 } 650 651 void skl_disable_dc6(struct drm_i915_private *dev_priv) 652 { 653 DRM_DEBUG_KMS("Disabling DC6\n"); 654 655 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 656 } 657 658 static void 659 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, 660 struct i915_power_well *power_well) 661 { 662 enum skl_disp_power_wells power_well_id = power_well->data; 663 u32 val; 664 u32 mask; 665 666 mask = SKL_POWER_WELL_REQ(power_well_id); 667 668 val = I915_READ(HSW_PWR_WELL_KVMR); 669 if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n", 670 power_well->name)) 671 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask); 672 673 val = I915_READ(HSW_PWR_WELL_BIOS); 674 val |= I915_READ(HSW_PWR_WELL_DEBUG); 675 676 if (!(val & mask)) 677 return; 678 679 /* 680 * DMC is known to force on the request bits for power well 1 on SKL 681 * and BXT and the misc IO power well on SKL but we don't expect any 682 * other request bits to be set, so WARN for those. 683 */ 684 if (power_well_id == SKL_DISP_PW_1 || 685 ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 686 power_well_id == SKL_DISP_PW_MISC_IO)) 687 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on " 688 "by DMC\n", power_well->name); 689 else 690 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n", 691 power_well->name); 692 693 I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask); 694 I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask); 695 } 696 697 static void skl_set_power_well(struct drm_i915_private *dev_priv, 698 struct i915_power_well *power_well, bool enable) 699 { 700 uint32_t tmp, fuse_status; 701 uint32_t req_mask, state_mask; 702 bool is_enabled, enable_requested, check_fuse_status = false; 703 704 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 705 fuse_status = I915_READ(SKL_FUSE_STATUS); 706 707 switch (power_well->data) { 708 case SKL_DISP_PW_1: 709 if (intel_wait_for_register(dev_priv, 710 SKL_FUSE_STATUS, 711 SKL_FUSE_PG0_DIST_STATUS, 712 SKL_FUSE_PG0_DIST_STATUS, 713 1)) { 714 DRM_ERROR("PG0 not enabled\n"); 715 return; 716 } 717 break; 718 case SKL_DISP_PW_2: 719 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) { 720 DRM_ERROR("PG1 in disabled state\n"); 721 return; 722 } 723 break; 724 case SKL_DISP_PW_DDI_A_E: 725 case SKL_DISP_PW_DDI_B: 726 case SKL_DISP_PW_DDI_C: 727 case SKL_DISP_PW_DDI_D: 728 case SKL_DISP_PW_MISC_IO: 729 break; 730 default: 731 WARN(1, "Unknown power well %lu\n", power_well->data); 732 return; 733 } 734 735 req_mask = SKL_POWER_WELL_REQ(power_well->data); 736 enable_requested = tmp & req_mask; 737 state_mask = SKL_POWER_WELL_STATE(power_well->data); 738 is_enabled = tmp & state_mask; 739 740 if (!enable && enable_requested) 741 skl_power_well_pre_disable(dev_priv, power_well); 742 743 if (enable) { 744 if (!enable_requested) { 745 WARN((tmp & state_mask) && 746 !I915_READ(HSW_PWR_WELL_BIOS), 747 "Invalid for power well status to be enabled, unless done by the BIOS, \ 748 when request is to disable!\n"); 749 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 750 } 751 752 if (!is_enabled) { 753 DRM_DEBUG_KMS("Enabling %s\n", power_well->name); 754 check_fuse_status = true; 755 } 756 } else { 757 if (enable_requested) { 758 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 759 POSTING_READ(HSW_PWR_WELL_DRIVER); 760 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 761 } 762 763 if (IS_GEN9(dev_priv)) 764 gen9_sanitize_power_well_requests(dev_priv, power_well); 765 } 766 767 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable, 768 1)) 769 DRM_ERROR("%s %s timeout\n", 770 power_well->name, enable ? "enable" : "disable"); 771 772 if (check_fuse_status) { 773 if (power_well->data == SKL_DISP_PW_1) { 774 if (intel_wait_for_register(dev_priv, 775 SKL_FUSE_STATUS, 776 SKL_FUSE_PG1_DIST_STATUS, 777 SKL_FUSE_PG1_DIST_STATUS, 778 1)) 779 DRM_ERROR("PG1 distributing status timeout\n"); 780 } else if (power_well->data == SKL_DISP_PW_2) { 781 if (intel_wait_for_register(dev_priv, 782 SKL_FUSE_STATUS, 783 SKL_FUSE_PG2_DIST_STATUS, 784 SKL_FUSE_PG2_DIST_STATUS, 785 1)) 786 DRM_ERROR("PG2 distributing status timeout\n"); 787 } 788 } 789 790 if (enable && !is_enabled) 791 skl_power_well_post_enable(dev_priv, power_well); 792 } 793 794 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 795 struct i915_power_well *power_well) 796 { 797 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 798 799 /* 800 * We're taking over the BIOS, so clear any requests made by it since 801 * the driver is in charge now. 802 */ 803 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 804 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 805 } 806 807 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 808 struct i915_power_well *power_well) 809 { 810 hsw_set_power_well(dev_priv, power_well, true); 811 } 812 813 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 814 struct i915_power_well *power_well) 815 { 816 hsw_set_power_well(dev_priv, power_well, false); 817 } 818 819 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv, 820 struct i915_power_well *power_well) 821 { 822 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) | 823 SKL_POWER_WELL_STATE(power_well->data); 824 825 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask; 826 } 827 828 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv, 829 struct i915_power_well *power_well) 830 { 831 skl_set_power_well(dev_priv, power_well, power_well->count > 0); 832 833 /* Clear any request made by BIOS as driver is taking over */ 834 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 835 } 836 837 static void skl_power_well_enable(struct drm_i915_private *dev_priv, 838 struct i915_power_well *power_well) 839 { 840 skl_set_power_well(dev_priv, power_well, true); 841 } 842 843 static void skl_power_well_disable(struct drm_i915_private *dev_priv, 844 struct i915_power_well *power_well) 845 { 846 skl_set_power_well(dev_priv, power_well, false); 847 } 848 849 static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well) 850 { 851 enum skl_disp_power_wells power_well_id = power_well->data; 852 853 return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0; 854 } 855 856 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 857 struct i915_power_well *power_well) 858 { 859 enum skl_disp_power_wells power_well_id = power_well->data; 860 struct i915_power_well *cmn_a_well = NULL; 861 862 if (power_well_id == BXT_DPIO_CMN_BC) { 863 /* 864 * We need to copy the GRC calibration value from the eDP PHY, 865 * so make sure it's powered up. 866 */ 867 cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); 868 intel_power_well_get(dev_priv, cmn_a_well); 869 } 870 871 bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well)); 872 873 if (cmn_a_well) 874 intel_power_well_put(dev_priv, cmn_a_well); 875 } 876 877 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 878 struct i915_power_well *power_well) 879 { 880 bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well)); 881 } 882 883 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 884 struct i915_power_well *power_well) 885 { 886 return bxt_ddi_phy_is_enabled(dev_priv, 887 bxt_power_well_to_phy(power_well)); 888 } 889 890 static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv, 891 struct i915_power_well *power_well) 892 { 893 if (power_well->count > 0) 894 bxt_dpio_cmn_power_well_enable(dev_priv, power_well); 895 else 896 bxt_dpio_cmn_power_well_disable(dev_priv, power_well); 897 } 898 899 900 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 901 { 902 struct i915_power_well *power_well; 903 904 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); 905 if (power_well->count > 0) 906 bxt_ddi_phy_verify_state(dev_priv, 907 bxt_power_well_to_phy(power_well)); 908 909 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); 910 if (power_well->count > 0) 911 bxt_ddi_phy_verify_state(dev_priv, 912 bxt_power_well_to_phy(power_well)); 913 } 914 915 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 916 struct i915_power_well *power_well) 917 { 918 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 919 } 920 921 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 922 { 923 u32 tmp = I915_READ(DBUF_CTL); 924 925 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 926 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 927 "Unexpected DBuf power power state (0x%08x)\n", tmp); 928 } 929 930 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 931 struct i915_power_well *power_well) 932 { 933 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 934 935 WARN_ON(dev_priv->cdclk_freq != 936 dev_priv->display.get_display_clock_speed(&dev_priv->drm)); 937 938 gen9_assert_dbuf_enabled(dev_priv); 939 940 if (IS_BROXTON(dev_priv)) 941 bxt_verify_ddi_phy_power_wells(dev_priv); 942 } 943 944 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 945 struct i915_power_well *power_well) 946 { 947 if (!dev_priv->csr.dmc_payload) 948 return; 949 950 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 951 skl_enable_dc6(dev_priv); 952 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 953 gen9_enable_dc5(dev_priv); 954 } 955 956 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, 957 struct i915_power_well *power_well) 958 { 959 if (power_well->count > 0) 960 gen9_dc_off_power_well_enable(dev_priv, power_well); 961 else 962 gen9_dc_off_power_well_disable(dev_priv, power_well); 963 } 964 965 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 966 struct i915_power_well *power_well) 967 { 968 } 969 970 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 971 struct i915_power_well *power_well) 972 { 973 return true; 974 } 975 976 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 977 struct i915_power_well *power_well, bool enable) 978 { 979 enum punit_power_well power_well_id = power_well->data; 980 u32 mask; 981 u32 state; 982 u32 ctrl; 983 984 mask = PUNIT_PWRGT_MASK(power_well_id); 985 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 986 PUNIT_PWRGT_PWR_GATE(power_well_id); 987 988 mutex_lock(&dev_priv->rps.hw_lock); 989 990 #define COND \ 991 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 992 993 if (COND) 994 goto out; 995 996 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 997 ctrl &= ~mask; 998 ctrl |= state; 999 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1000 1001 if (wait_for(COND, 100)) 1002 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1003 state, 1004 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1005 1006 #undef COND 1007 1008 out: 1009 mutex_unlock(&dev_priv->rps.hw_lock); 1010 } 1011 1012 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 1013 struct i915_power_well *power_well) 1014 { 1015 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 1016 } 1017 1018 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1019 struct i915_power_well *power_well) 1020 { 1021 vlv_set_power_well(dev_priv, power_well, true); 1022 } 1023 1024 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1025 struct i915_power_well *power_well) 1026 { 1027 vlv_set_power_well(dev_priv, power_well, false); 1028 } 1029 1030 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1031 struct i915_power_well *power_well) 1032 { 1033 int power_well_id = power_well->data; 1034 bool enabled = false; 1035 u32 mask; 1036 u32 state; 1037 u32 ctrl; 1038 1039 mask = PUNIT_PWRGT_MASK(power_well_id); 1040 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 1041 1042 mutex_lock(&dev_priv->rps.hw_lock); 1043 1044 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1045 /* 1046 * We only ever set the power-on and power-gate states, anything 1047 * else is unexpected. 1048 */ 1049 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 1050 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 1051 if (state == ctrl) 1052 enabled = true; 1053 1054 /* 1055 * A transient state at this point would mean some unexpected party 1056 * is poking at the power controls too. 1057 */ 1058 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1059 WARN_ON(ctrl != state); 1060 1061 mutex_unlock(&dev_priv->rps.hw_lock); 1062 1063 return enabled; 1064 } 1065 1066 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1067 { 1068 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 1069 1070 /* 1071 * Disable trickle feed and enable pnd deadline calculation 1072 */ 1073 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1074 I915_WRITE(CBR1_VLV, 0); 1075 1076 WARN_ON(dev_priv->rawclk_freq == 0); 1077 1078 I915_WRITE(RAWCLK_FREQ_VLV, 1079 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1080 } 1081 1082 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1083 { 1084 struct intel_encoder *encoder; 1085 enum pipe pipe; 1086 1087 /* 1088 * Enable the CRI clock source so we can get at the 1089 * display and the reference clock for VGA 1090 * hotplug / manual detection. Supposedly DSI also 1091 * needs the ref clock up and running. 1092 * 1093 * CHV DPLL B/C have some issues if VGA mode is enabled. 1094 */ 1095 for_each_pipe(&dev_priv->drm, pipe) { 1096 u32 val = I915_READ(DPLL(pipe)); 1097 1098 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1099 if (pipe != PIPE_A) 1100 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1101 1102 I915_WRITE(DPLL(pipe), val); 1103 } 1104 1105 vlv_init_display_clock_gating(dev_priv); 1106 1107 spin_lock_irq(&dev_priv->irq_lock); 1108 valleyview_enable_display_irqs(dev_priv); 1109 spin_unlock_irq(&dev_priv->irq_lock); 1110 1111 /* 1112 * During driver initialization/resume we can avoid restoring the 1113 * part of the HW/SW state that will be inited anyway explicitly. 1114 */ 1115 if (dev_priv->power_domains.initializing) 1116 return; 1117 1118 intel_hpd_init(dev_priv); 1119 1120 /* Re-enable the ADPA, if we have one */ 1121 for_each_intel_encoder(&dev_priv->drm, encoder) { 1122 if (encoder->type == INTEL_OUTPUT_ANALOG) 1123 intel_crt_reset(&encoder->base); 1124 } 1125 1126 i915_redisable_vga_power_on(&dev_priv->drm); 1127 1128 intel_pps_unlock_regs_wa(dev_priv); 1129 } 1130 1131 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1132 { 1133 spin_lock_irq(&dev_priv->irq_lock); 1134 valleyview_disable_display_irqs(dev_priv); 1135 spin_unlock_irq(&dev_priv->irq_lock); 1136 1137 /* make sure we're done processing display irqs */ 1138 synchronize_irq(dev_priv->drm.irq); 1139 1140 intel_power_sequencer_reset(dev_priv); 1141 1142 intel_hpd_poll_init(dev_priv); 1143 } 1144 1145 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1146 struct i915_power_well *power_well) 1147 { 1148 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 1149 1150 vlv_set_power_well(dev_priv, power_well, true); 1151 1152 vlv_display_power_well_init(dev_priv); 1153 } 1154 1155 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1156 struct i915_power_well *power_well) 1157 { 1158 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 1159 1160 vlv_display_power_well_deinit(dev_priv); 1161 1162 vlv_set_power_well(dev_priv, power_well, false); 1163 } 1164 1165 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1166 struct i915_power_well *power_well) 1167 { 1168 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 1169 1170 /* since ref/cri clock was enabled */ 1171 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1172 1173 vlv_set_power_well(dev_priv, power_well, true); 1174 1175 /* 1176 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1177 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1178 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1179 * b. The other bits such as sfr settings / modesel may all 1180 * be set to 0. 1181 * 1182 * This should only be done on init and resume from S3 with 1183 * both PLLs disabled, or we risk losing DPIO and PLL 1184 * synchronization. 1185 */ 1186 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1187 } 1188 1189 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1190 struct i915_power_well *power_well) 1191 { 1192 enum pipe pipe; 1193 1194 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 1195 1196 for_each_pipe(dev_priv, pipe) 1197 assert_pll_disabled(dev_priv, pipe); 1198 1199 /* Assert common reset */ 1200 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1201 1202 vlv_set_power_well(dev_priv, power_well, false); 1203 } 1204 1205 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 1206 1207 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 1208 int power_well_id) 1209 { 1210 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1211 int i; 1212 1213 for (i = 0; i < power_domains->power_well_count; i++) { 1214 struct i915_power_well *power_well; 1215 1216 power_well = &power_domains->power_wells[i]; 1217 if (power_well->data == power_well_id) 1218 return power_well; 1219 } 1220 1221 return NULL; 1222 } 1223 1224 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1225 1226 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1227 { 1228 struct i915_power_well *cmn_bc = 1229 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1230 struct i915_power_well *cmn_d = 1231 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1232 u32 phy_control = dev_priv->chv_phy_control; 1233 u32 phy_status = 0; 1234 u32 phy_status_mask = 0xffffffff; 1235 1236 /* 1237 * The BIOS can leave the PHY is some weird state 1238 * where it doesn't fully power down some parts. 1239 * Disable the asserts until the PHY has been fully 1240 * reset (ie. the power well has been disabled at 1241 * least once). 1242 */ 1243 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1244 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1245 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1246 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1247 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1248 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1249 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1250 1251 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1252 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1253 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1254 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1255 1256 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1257 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1258 1259 /* this assumes override is only used to enable lanes */ 1260 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1261 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1262 1263 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1264 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1265 1266 /* CL1 is on whenever anything is on in either channel */ 1267 if (BITS_SET(phy_control, 1268 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1269 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1270 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1271 1272 /* 1273 * The DPLLB check accounts for the pipe B + port A usage 1274 * with CL2 powered up but all the lanes in the second channel 1275 * powered down. 1276 */ 1277 if (BITS_SET(phy_control, 1278 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1279 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1280 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1281 1282 if (BITS_SET(phy_control, 1283 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1284 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1285 if (BITS_SET(phy_control, 1286 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1287 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1288 1289 if (BITS_SET(phy_control, 1290 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1291 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1292 if (BITS_SET(phy_control, 1293 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1294 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1295 } 1296 1297 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1298 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1299 1300 /* this assumes override is only used to enable lanes */ 1301 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1302 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1303 1304 if (BITS_SET(phy_control, 1305 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1306 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1307 1308 if (BITS_SET(phy_control, 1309 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1310 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1311 if (BITS_SET(phy_control, 1312 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1313 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1314 } 1315 1316 phy_status &= phy_status_mask; 1317 1318 /* 1319 * The PHY may be busy with some initial calibration and whatnot, 1320 * so the power state can take a while to actually change. 1321 */ 1322 if (intel_wait_for_register(dev_priv, 1323 DISPLAY_PHY_STATUS, 1324 phy_status_mask, 1325 phy_status, 1326 10)) 1327 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1328 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1329 phy_status, dev_priv->chv_phy_control); 1330 } 1331 1332 #undef BITS_SET 1333 1334 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1335 struct i915_power_well *power_well) 1336 { 1337 enum dpio_phy phy; 1338 enum pipe pipe; 1339 uint32_t tmp; 1340 1341 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1342 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1343 1344 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1345 pipe = PIPE_A; 1346 phy = DPIO_PHY0; 1347 } else { 1348 pipe = PIPE_C; 1349 phy = DPIO_PHY1; 1350 } 1351 1352 /* since ref/cri clock was enabled */ 1353 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1354 vlv_set_power_well(dev_priv, power_well, true); 1355 1356 /* Poll for phypwrgood signal */ 1357 if (intel_wait_for_register(dev_priv, 1358 DISPLAY_PHY_STATUS, 1359 PHY_POWERGOOD(phy), 1360 PHY_POWERGOOD(phy), 1361 1)) 1362 DRM_ERROR("Display PHY %d is not power up\n", phy); 1363 1364 mutex_lock(&dev_priv->sb_lock); 1365 1366 /* Enable dynamic power down */ 1367 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1368 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1369 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1370 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1371 1372 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1373 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1374 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1375 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1376 } else { 1377 /* 1378 * Force the non-existing CL2 off. BXT does this 1379 * too, so maybe it saves some power even though 1380 * CL2 doesn't exist? 1381 */ 1382 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1383 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1384 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1385 } 1386 1387 mutex_unlock(&dev_priv->sb_lock); 1388 1389 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1390 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1391 1392 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1393 phy, dev_priv->chv_phy_control); 1394 1395 assert_chv_phy_status(dev_priv); 1396 } 1397 1398 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1399 struct i915_power_well *power_well) 1400 { 1401 enum dpio_phy phy; 1402 1403 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1404 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1405 1406 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1407 phy = DPIO_PHY0; 1408 assert_pll_disabled(dev_priv, PIPE_A); 1409 assert_pll_disabled(dev_priv, PIPE_B); 1410 } else { 1411 phy = DPIO_PHY1; 1412 assert_pll_disabled(dev_priv, PIPE_C); 1413 } 1414 1415 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1416 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1417 1418 vlv_set_power_well(dev_priv, power_well, false); 1419 1420 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1421 phy, dev_priv->chv_phy_control); 1422 1423 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1424 dev_priv->chv_phy_assert[phy] = true; 1425 1426 assert_chv_phy_status(dev_priv); 1427 } 1428 1429 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1430 enum dpio_channel ch, bool override, unsigned int mask) 1431 { 1432 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1433 u32 reg, val, expected, actual; 1434 1435 /* 1436 * The BIOS can leave the PHY is some weird state 1437 * where it doesn't fully power down some parts. 1438 * Disable the asserts until the PHY has been fully 1439 * reset (ie. the power well has been disabled at 1440 * least once). 1441 */ 1442 if (!dev_priv->chv_phy_assert[phy]) 1443 return; 1444 1445 if (ch == DPIO_CH0) 1446 reg = _CHV_CMN_DW0_CH0; 1447 else 1448 reg = _CHV_CMN_DW6_CH1; 1449 1450 mutex_lock(&dev_priv->sb_lock); 1451 val = vlv_dpio_read(dev_priv, pipe, reg); 1452 mutex_unlock(&dev_priv->sb_lock); 1453 1454 /* 1455 * This assumes !override is only used when the port is disabled. 1456 * All lanes should power down even without the override when 1457 * the port is disabled. 1458 */ 1459 if (!override || mask == 0xf) { 1460 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1461 /* 1462 * If CH1 common lane is not active anymore 1463 * (eg. for pipe B DPLL) the entire channel will 1464 * shut down, which causes the common lane registers 1465 * to read as 0. That means we can't actually check 1466 * the lane power down status bits, but as the entire 1467 * register reads as 0 it's a good indication that the 1468 * channel is indeed entirely powered down. 1469 */ 1470 if (ch == DPIO_CH1 && val == 0) 1471 expected = 0; 1472 } else if (mask != 0x0) { 1473 expected = DPIO_ANYDL_POWERDOWN; 1474 } else { 1475 expected = 0; 1476 } 1477 1478 if (ch == DPIO_CH0) 1479 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1480 else 1481 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1482 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1483 1484 WARN(actual != expected, 1485 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1486 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1487 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1488 reg, val); 1489 } 1490 1491 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1492 enum dpio_channel ch, bool override) 1493 { 1494 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1495 bool was_override; 1496 1497 mutex_lock(&power_domains->lock); 1498 1499 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1500 1501 if (override == was_override) 1502 goto out; 1503 1504 if (override) 1505 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1506 else 1507 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1508 1509 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1510 1511 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1512 phy, ch, dev_priv->chv_phy_control); 1513 1514 assert_chv_phy_status(dev_priv); 1515 1516 out: 1517 mutex_unlock(&power_domains->lock); 1518 1519 return was_override; 1520 } 1521 1522 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1523 bool override, unsigned int mask) 1524 { 1525 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1526 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1527 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1528 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1529 1530 mutex_lock(&power_domains->lock); 1531 1532 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1533 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1534 1535 if (override) 1536 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1537 else 1538 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1539 1540 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1541 1542 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1543 phy, ch, mask, dev_priv->chv_phy_control); 1544 1545 assert_chv_phy_status(dev_priv); 1546 1547 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1548 1549 mutex_unlock(&power_domains->lock); 1550 } 1551 1552 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1553 struct i915_power_well *power_well) 1554 { 1555 enum pipe pipe = power_well->data; 1556 bool enabled; 1557 u32 state, ctrl; 1558 1559 mutex_lock(&dev_priv->rps.hw_lock); 1560 1561 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1562 /* 1563 * We only ever set the power-on and power-gate states, anything 1564 * else is unexpected. 1565 */ 1566 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1567 enabled = state == DP_SSS_PWR_ON(pipe); 1568 1569 /* 1570 * A transient state at this point would mean some unexpected party 1571 * is poking at the power controls too. 1572 */ 1573 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1574 WARN_ON(ctrl << 16 != state); 1575 1576 mutex_unlock(&dev_priv->rps.hw_lock); 1577 1578 return enabled; 1579 } 1580 1581 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1582 struct i915_power_well *power_well, 1583 bool enable) 1584 { 1585 enum pipe pipe = power_well->data; 1586 u32 state; 1587 u32 ctrl; 1588 1589 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1590 1591 mutex_lock(&dev_priv->rps.hw_lock); 1592 1593 #define COND \ 1594 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1595 1596 if (COND) 1597 goto out; 1598 1599 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1600 ctrl &= ~DP_SSC_MASK(pipe); 1601 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1602 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1603 1604 if (wait_for(COND, 100)) 1605 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1606 state, 1607 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1608 1609 #undef COND 1610 1611 out: 1612 mutex_unlock(&dev_priv->rps.hw_lock); 1613 } 1614 1615 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1616 struct i915_power_well *power_well) 1617 { 1618 WARN_ON_ONCE(power_well->data != PIPE_A); 1619 1620 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 1621 } 1622 1623 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1624 struct i915_power_well *power_well) 1625 { 1626 WARN_ON_ONCE(power_well->data != PIPE_A); 1627 1628 chv_set_pipe_power_well(dev_priv, power_well, true); 1629 1630 vlv_display_power_well_init(dev_priv); 1631 } 1632 1633 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1634 struct i915_power_well *power_well) 1635 { 1636 WARN_ON_ONCE(power_well->data != PIPE_A); 1637 1638 vlv_display_power_well_deinit(dev_priv); 1639 1640 chv_set_pipe_power_well(dev_priv, power_well, false); 1641 } 1642 1643 static void 1644 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1645 enum intel_display_power_domain domain) 1646 { 1647 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1648 struct i915_power_well *power_well; 1649 int i; 1650 1651 for_each_power_well(i, power_well, BIT(domain), power_domains) 1652 intel_power_well_get(dev_priv, power_well); 1653 1654 power_domains->domain_use_count[domain]++; 1655 } 1656 1657 /** 1658 * intel_display_power_get - grab a power domain reference 1659 * @dev_priv: i915 device instance 1660 * @domain: power domain to reference 1661 * 1662 * This function grabs a power domain reference for @domain and ensures that the 1663 * power domain and all its parents are powered up. Therefore users should only 1664 * grab a reference to the innermost power domain they need. 1665 * 1666 * Any power domain reference obtained by this function must have a symmetric 1667 * call to intel_display_power_put() to release the reference again. 1668 */ 1669 void intel_display_power_get(struct drm_i915_private *dev_priv, 1670 enum intel_display_power_domain domain) 1671 { 1672 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1673 1674 intel_runtime_pm_get(dev_priv); 1675 1676 mutex_lock(&power_domains->lock); 1677 1678 __intel_display_power_get_domain(dev_priv, domain); 1679 1680 mutex_unlock(&power_domains->lock); 1681 } 1682 1683 /** 1684 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1685 * @dev_priv: i915 device instance 1686 * @domain: power domain to reference 1687 * 1688 * This function grabs a power domain reference for @domain and ensures that the 1689 * power domain and all its parents are powered up. Therefore users should only 1690 * grab a reference to the innermost power domain they need. 1691 * 1692 * Any power domain reference obtained by this function must have a symmetric 1693 * call to intel_display_power_put() to release the reference again. 1694 */ 1695 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1696 enum intel_display_power_domain domain) 1697 { 1698 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1699 bool is_enabled; 1700 1701 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 1702 return false; 1703 1704 mutex_lock(&power_domains->lock); 1705 1706 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1707 __intel_display_power_get_domain(dev_priv, domain); 1708 is_enabled = true; 1709 } else { 1710 is_enabled = false; 1711 } 1712 1713 mutex_unlock(&power_domains->lock); 1714 1715 if (!is_enabled) 1716 intel_runtime_pm_put(dev_priv); 1717 1718 return is_enabled; 1719 } 1720 1721 /** 1722 * intel_display_power_put - release a power domain reference 1723 * @dev_priv: i915 device instance 1724 * @domain: power domain to reference 1725 * 1726 * This function drops the power domain reference obtained by 1727 * intel_display_power_get() and might power down the corresponding hardware 1728 * block right away if this is the last reference. 1729 */ 1730 void intel_display_power_put(struct drm_i915_private *dev_priv, 1731 enum intel_display_power_domain domain) 1732 { 1733 struct i915_power_domains *power_domains; 1734 struct i915_power_well *power_well; 1735 int i; 1736 1737 power_domains = &dev_priv->power_domains; 1738 1739 mutex_lock(&power_domains->lock); 1740 1741 WARN(!power_domains->domain_use_count[domain], 1742 "Use count on domain %s is already zero\n", 1743 intel_display_power_domain_str(domain)); 1744 power_domains->domain_use_count[domain]--; 1745 1746 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) 1747 intel_power_well_put(dev_priv, power_well); 1748 1749 mutex_unlock(&power_domains->lock); 1750 1751 intel_runtime_pm_put(dev_priv); 1752 } 1753 1754 #define HSW_DISPLAY_POWER_DOMAINS ( \ 1755 BIT(POWER_DOMAIN_PIPE_B) | \ 1756 BIT(POWER_DOMAIN_PIPE_C) | \ 1757 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1758 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1759 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1760 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1761 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1762 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1763 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1764 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1765 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1766 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1767 BIT(POWER_DOMAIN_VGA) | \ 1768 BIT(POWER_DOMAIN_AUDIO) | \ 1769 BIT(POWER_DOMAIN_INIT)) 1770 1771 #define BDW_DISPLAY_POWER_DOMAINS ( \ 1772 BIT(POWER_DOMAIN_PIPE_B) | \ 1773 BIT(POWER_DOMAIN_PIPE_C) | \ 1774 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1775 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1776 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1777 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1778 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1779 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1780 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1781 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1782 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1783 BIT(POWER_DOMAIN_VGA) | \ 1784 BIT(POWER_DOMAIN_AUDIO) | \ 1785 BIT(POWER_DOMAIN_INIT)) 1786 1787 #define VLV_DISPLAY_POWER_DOMAINS ( \ 1788 BIT(POWER_DOMAIN_PIPE_A) | \ 1789 BIT(POWER_DOMAIN_PIPE_B) | \ 1790 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1791 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1792 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1793 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1794 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1795 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1796 BIT(POWER_DOMAIN_PORT_DSI) | \ 1797 BIT(POWER_DOMAIN_PORT_CRT) | \ 1798 BIT(POWER_DOMAIN_VGA) | \ 1799 BIT(POWER_DOMAIN_AUDIO) | \ 1800 BIT(POWER_DOMAIN_AUX_B) | \ 1801 BIT(POWER_DOMAIN_AUX_C) | \ 1802 BIT(POWER_DOMAIN_GMBUS) | \ 1803 BIT(POWER_DOMAIN_INIT)) 1804 1805 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1806 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1807 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1808 BIT(POWER_DOMAIN_PORT_CRT) | \ 1809 BIT(POWER_DOMAIN_AUX_B) | \ 1810 BIT(POWER_DOMAIN_AUX_C) | \ 1811 BIT(POWER_DOMAIN_INIT)) 1812 1813 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1814 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1815 BIT(POWER_DOMAIN_AUX_B) | \ 1816 BIT(POWER_DOMAIN_INIT)) 1817 1818 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1819 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1820 BIT(POWER_DOMAIN_AUX_B) | \ 1821 BIT(POWER_DOMAIN_INIT)) 1822 1823 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1824 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1825 BIT(POWER_DOMAIN_AUX_C) | \ 1826 BIT(POWER_DOMAIN_INIT)) 1827 1828 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1829 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1830 BIT(POWER_DOMAIN_AUX_C) | \ 1831 BIT(POWER_DOMAIN_INIT)) 1832 1833 #define CHV_DISPLAY_POWER_DOMAINS ( \ 1834 BIT(POWER_DOMAIN_PIPE_A) | \ 1835 BIT(POWER_DOMAIN_PIPE_B) | \ 1836 BIT(POWER_DOMAIN_PIPE_C) | \ 1837 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1838 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1839 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1840 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1841 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1842 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1843 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1844 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1845 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1846 BIT(POWER_DOMAIN_PORT_DSI) | \ 1847 BIT(POWER_DOMAIN_VGA) | \ 1848 BIT(POWER_DOMAIN_AUDIO) | \ 1849 BIT(POWER_DOMAIN_AUX_B) | \ 1850 BIT(POWER_DOMAIN_AUX_C) | \ 1851 BIT(POWER_DOMAIN_AUX_D) | \ 1852 BIT(POWER_DOMAIN_GMBUS) | \ 1853 BIT(POWER_DOMAIN_INIT)) 1854 1855 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1856 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1857 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1858 BIT(POWER_DOMAIN_AUX_B) | \ 1859 BIT(POWER_DOMAIN_AUX_C) | \ 1860 BIT(POWER_DOMAIN_INIT)) 1861 1862 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1863 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1864 BIT(POWER_DOMAIN_AUX_D) | \ 1865 BIT(POWER_DOMAIN_INIT)) 1866 1867 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1868 .sync_hw = i9xx_always_on_power_well_noop, 1869 .enable = i9xx_always_on_power_well_noop, 1870 .disable = i9xx_always_on_power_well_noop, 1871 .is_enabled = i9xx_always_on_power_well_enabled, 1872 }; 1873 1874 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 1875 .sync_hw = chv_pipe_power_well_sync_hw, 1876 .enable = chv_pipe_power_well_enable, 1877 .disable = chv_pipe_power_well_disable, 1878 .is_enabled = chv_pipe_power_well_enabled, 1879 }; 1880 1881 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1882 .sync_hw = vlv_power_well_sync_hw, 1883 .enable = chv_dpio_cmn_power_well_enable, 1884 .disable = chv_dpio_cmn_power_well_disable, 1885 .is_enabled = vlv_power_well_enabled, 1886 }; 1887 1888 static struct i915_power_well i9xx_always_on_power_well[] = { 1889 { 1890 .name = "always-on", 1891 .always_on = 1, 1892 .domains = POWER_DOMAIN_MASK, 1893 .ops = &i9xx_always_on_power_well_ops, 1894 }, 1895 }; 1896 1897 static const struct i915_power_well_ops hsw_power_well_ops = { 1898 .sync_hw = hsw_power_well_sync_hw, 1899 .enable = hsw_power_well_enable, 1900 .disable = hsw_power_well_disable, 1901 .is_enabled = hsw_power_well_enabled, 1902 }; 1903 1904 static const struct i915_power_well_ops skl_power_well_ops = { 1905 .sync_hw = skl_power_well_sync_hw, 1906 .enable = skl_power_well_enable, 1907 .disable = skl_power_well_disable, 1908 .is_enabled = skl_power_well_enabled, 1909 }; 1910 1911 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1912 .sync_hw = gen9_dc_off_power_well_sync_hw, 1913 .enable = gen9_dc_off_power_well_enable, 1914 .disable = gen9_dc_off_power_well_disable, 1915 .is_enabled = gen9_dc_off_power_well_enabled, 1916 }; 1917 1918 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1919 .sync_hw = bxt_dpio_cmn_power_well_sync_hw, 1920 .enable = bxt_dpio_cmn_power_well_enable, 1921 .disable = bxt_dpio_cmn_power_well_disable, 1922 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1923 }; 1924 1925 static struct i915_power_well hsw_power_wells[] = { 1926 { 1927 .name = "always-on", 1928 .always_on = 1, 1929 .domains = POWER_DOMAIN_MASK, 1930 .ops = &i9xx_always_on_power_well_ops, 1931 }, 1932 { 1933 .name = "display", 1934 .domains = HSW_DISPLAY_POWER_DOMAINS, 1935 .ops = &hsw_power_well_ops, 1936 }, 1937 }; 1938 1939 static struct i915_power_well bdw_power_wells[] = { 1940 { 1941 .name = "always-on", 1942 .always_on = 1, 1943 .domains = POWER_DOMAIN_MASK, 1944 .ops = &i9xx_always_on_power_well_ops, 1945 }, 1946 { 1947 .name = "display", 1948 .domains = BDW_DISPLAY_POWER_DOMAINS, 1949 .ops = &hsw_power_well_ops, 1950 }, 1951 }; 1952 1953 static const struct i915_power_well_ops vlv_display_power_well_ops = { 1954 .sync_hw = vlv_power_well_sync_hw, 1955 .enable = vlv_display_power_well_enable, 1956 .disable = vlv_display_power_well_disable, 1957 .is_enabled = vlv_power_well_enabled, 1958 }; 1959 1960 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1961 .sync_hw = vlv_power_well_sync_hw, 1962 .enable = vlv_dpio_cmn_power_well_enable, 1963 .disable = vlv_dpio_cmn_power_well_disable, 1964 .is_enabled = vlv_power_well_enabled, 1965 }; 1966 1967 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1968 .sync_hw = vlv_power_well_sync_hw, 1969 .enable = vlv_power_well_enable, 1970 .disable = vlv_power_well_disable, 1971 .is_enabled = vlv_power_well_enabled, 1972 }; 1973 1974 static struct i915_power_well vlv_power_wells[] = { 1975 { 1976 .name = "always-on", 1977 .always_on = 1, 1978 .domains = POWER_DOMAIN_MASK, 1979 .ops = &i9xx_always_on_power_well_ops, 1980 .data = PUNIT_POWER_WELL_ALWAYS_ON, 1981 }, 1982 { 1983 .name = "display", 1984 .domains = VLV_DISPLAY_POWER_DOMAINS, 1985 .data = PUNIT_POWER_WELL_DISP2D, 1986 .ops = &vlv_display_power_well_ops, 1987 }, 1988 { 1989 .name = "dpio-tx-b-01", 1990 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1991 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1992 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1993 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1994 .ops = &vlv_dpio_power_well_ops, 1995 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 1996 }, 1997 { 1998 .name = "dpio-tx-b-23", 1999 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2000 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2001 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2002 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2003 .ops = &vlv_dpio_power_well_ops, 2004 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 2005 }, 2006 { 2007 .name = "dpio-tx-c-01", 2008 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2009 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2010 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2011 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2012 .ops = &vlv_dpio_power_well_ops, 2013 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 2014 }, 2015 { 2016 .name = "dpio-tx-c-23", 2017 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2018 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2019 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2020 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2021 .ops = &vlv_dpio_power_well_ops, 2022 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 2023 }, 2024 { 2025 .name = "dpio-common", 2026 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2027 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 2028 .ops = &vlv_dpio_cmn_power_well_ops, 2029 }, 2030 }; 2031 2032 static struct i915_power_well chv_power_wells[] = { 2033 { 2034 .name = "always-on", 2035 .always_on = 1, 2036 .domains = POWER_DOMAIN_MASK, 2037 .ops = &i9xx_always_on_power_well_ops, 2038 }, 2039 { 2040 .name = "display", 2041 /* 2042 * Pipe A power well is the new disp2d well. Pipe B and C 2043 * power wells don't actually exist. Pipe A power well is 2044 * required for any pipe to work. 2045 */ 2046 .domains = CHV_DISPLAY_POWER_DOMAINS, 2047 .data = PIPE_A, 2048 .ops = &chv_pipe_power_well_ops, 2049 }, 2050 { 2051 .name = "dpio-common-bc", 2052 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2053 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 2054 .ops = &chv_dpio_cmn_power_well_ops, 2055 }, 2056 { 2057 .name = "dpio-common-d", 2058 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2059 .data = PUNIT_POWER_WELL_DPIO_CMN_D, 2060 .ops = &chv_dpio_cmn_power_well_ops, 2061 }, 2062 }; 2063 2064 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2065 int power_well_id) 2066 { 2067 struct i915_power_well *power_well; 2068 bool ret; 2069 2070 power_well = lookup_power_well(dev_priv, power_well_id); 2071 ret = power_well->ops->is_enabled(dev_priv, power_well); 2072 2073 return ret; 2074 } 2075 2076 static struct i915_power_well skl_power_wells[] = { 2077 { 2078 .name = "always-on", 2079 .always_on = 1, 2080 .domains = POWER_DOMAIN_MASK, 2081 .ops = &i9xx_always_on_power_well_ops, 2082 .data = SKL_DISP_PW_ALWAYS_ON, 2083 }, 2084 { 2085 .name = "power well 1", 2086 /* Handled by the DMC firmware */ 2087 .domains = 0, 2088 .ops = &skl_power_well_ops, 2089 .data = SKL_DISP_PW_1, 2090 }, 2091 { 2092 .name = "MISC IO power well", 2093 /* Handled by the DMC firmware */ 2094 .domains = 0, 2095 .ops = &skl_power_well_ops, 2096 .data = SKL_DISP_PW_MISC_IO, 2097 }, 2098 { 2099 .name = "DC off", 2100 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 2101 .ops = &gen9_dc_off_power_well_ops, 2102 .data = SKL_DISP_PW_DC_OFF, 2103 }, 2104 { 2105 .name = "power well 2", 2106 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2107 .ops = &skl_power_well_ops, 2108 .data = SKL_DISP_PW_2, 2109 }, 2110 { 2111 .name = "DDI A/E power well", 2112 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS, 2113 .ops = &skl_power_well_ops, 2114 .data = SKL_DISP_PW_DDI_A_E, 2115 }, 2116 { 2117 .name = "DDI B power well", 2118 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS, 2119 .ops = &skl_power_well_ops, 2120 .data = SKL_DISP_PW_DDI_B, 2121 }, 2122 { 2123 .name = "DDI C power well", 2124 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS, 2125 .ops = &skl_power_well_ops, 2126 .data = SKL_DISP_PW_DDI_C, 2127 }, 2128 { 2129 .name = "DDI D power well", 2130 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS, 2131 .ops = &skl_power_well_ops, 2132 .data = SKL_DISP_PW_DDI_D, 2133 }, 2134 }; 2135 2136 static struct i915_power_well bxt_power_wells[] = { 2137 { 2138 .name = "always-on", 2139 .always_on = 1, 2140 .domains = POWER_DOMAIN_MASK, 2141 .ops = &i9xx_always_on_power_well_ops, 2142 }, 2143 { 2144 .name = "power well 1", 2145 .domains = 0, 2146 .ops = &skl_power_well_ops, 2147 .data = SKL_DISP_PW_1, 2148 }, 2149 { 2150 .name = "DC off", 2151 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 2152 .ops = &gen9_dc_off_power_well_ops, 2153 .data = SKL_DISP_PW_DC_OFF, 2154 }, 2155 { 2156 .name = "power well 2", 2157 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2158 .ops = &skl_power_well_ops, 2159 .data = SKL_DISP_PW_2, 2160 }, 2161 { 2162 .name = "dpio-common-a", 2163 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 2164 .ops = &bxt_dpio_cmn_power_well_ops, 2165 .data = BXT_DPIO_CMN_A, 2166 }, 2167 { 2168 .name = "dpio-common-bc", 2169 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 2170 .ops = &bxt_dpio_cmn_power_well_ops, 2171 .data = BXT_DPIO_CMN_BC, 2172 }, 2173 }; 2174 2175 static int 2176 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 2177 int disable_power_well) 2178 { 2179 if (disable_power_well >= 0) 2180 return !!disable_power_well; 2181 2182 return 1; 2183 } 2184 2185 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 2186 int enable_dc) 2187 { 2188 uint32_t mask; 2189 int requested_dc; 2190 int max_dc; 2191 2192 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2193 max_dc = 2; 2194 mask = 0; 2195 } else if (IS_BROXTON(dev_priv)) { 2196 max_dc = 1; 2197 /* 2198 * DC9 has a separate HW flow from the rest of the DC states, 2199 * not depending on the DMC firmware. It's needed by system 2200 * suspend/resume, so allow it unconditionally. 2201 */ 2202 mask = DC_STATE_EN_DC9; 2203 } else { 2204 max_dc = 0; 2205 mask = 0; 2206 } 2207 2208 if (!i915.disable_power_well) 2209 max_dc = 0; 2210 2211 if (enable_dc >= 0 && enable_dc <= max_dc) { 2212 requested_dc = enable_dc; 2213 } else if (enable_dc == -1) { 2214 requested_dc = max_dc; 2215 } else if (enable_dc > max_dc && enable_dc <= 2) { 2216 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 2217 enable_dc, max_dc); 2218 requested_dc = max_dc; 2219 } else { 2220 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 2221 requested_dc = max_dc; 2222 } 2223 2224 if (requested_dc > 1) 2225 mask |= DC_STATE_EN_UPTO_DC6; 2226 if (requested_dc > 0) 2227 mask |= DC_STATE_EN_UPTO_DC5; 2228 2229 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 2230 2231 return mask; 2232 } 2233 2234 #define set_power_wells(power_domains, __power_wells) ({ \ 2235 (power_domains)->power_wells = (__power_wells); \ 2236 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 2237 }) 2238 2239 /** 2240 * intel_power_domains_init - initializes the power domain structures 2241 * @dev_priv: i915 device instance 2242 * 2243 * Initializes the power domain structures for @dev_priv depending upon the 2244 * supported platform. 2245 */ 2246 int intel_power_domains_init(struct drm_i915_private *dev_priv) 2247 { 2248 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2249 2250 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 2251 i915.disable_power_well); 2252 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, 2253 i915.enable_dc); 2254 2255 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); 2256 2257 mutex_init(&power_domains->lock); 2258 2259 /* 2260 * The enabling order will be from lower to higher indexed wells, 2261 * the disabling order is reversed. 2262 */ 2263 if (IS_HASWELL(dev_priv)) { 2264 set_power_wells(power_domains, hsw_power_wells); 2265 } else if (IS_BROADWELL(dev_priv)) { 2266 set_power_wells(power_domains, bdw_power_wells); 2267 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2268 set_power_wells(power_domains, skl_power_wells); 2269 } else if (IS_BROXTON(dev_priv)) { 2270 set_power_wells(power_domains, bxt_power_wells); 2271 } else if (IS_CHERRYVIEW(dev_priv)) { 2272 set_power_wells(power_domains, chv_power_wells); 2273 } else if (IS_VALLEYVIEW(dev_priv)) { 2274 set_power_wells(power_domains, vlv_power_wells); 2275 } else { 2276 set_power_wells(power_domains, i9xx_always_on_power_well); 2277 } 2278 2279 return 0; 2280 } 2281 2282 /** 2283 * intel_power_domains_fini - finalizes the power domain structures 2284 * @dev_priv: i915 device instance 2285 * 2286 * Finalizes the power domain structures for @dev_priv depending upon the 2287 * supported platform. This function also disables runtime pm and ensures that 2288 * the device stays powered up so that the driver can be reloaded. 2289 */ 2290 void intel_power_domains_fini(struct drm_i915_private *dev_priv) 2291 { 2292 struct device *kdev = &dev_priv->drm.pdev->dev; 2293 2294 /* 2295 * The i915.ko module is still not prepared to be loaded when 2296 * the power well is not enabled, so just enable it in case 2297 * we're going to unload/reload. 2298 * The following also reacquires the RPM reference the core passed 2299 * to the driver during loading, which is dropped in 2300 * intel_runtime_pm_enable(). We have to hand back the control of the 2301 * device to the core with this reference held. 2302 */ 2303 intel_display_set_init_power(dev_priv, true); 2304 2305 /* Remove the refcount we took to keep power well support disabled. */ 2306 if (!i915.disable_power_well) 2307 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2308 2309 /* 2310 * Remove the refcount we took in intel_runtime_pm_enable() in case 2311 * the platform doesn't support runtime PM. 2312 */ 2313 if (!HAS_RUNTIME_PM(dev_priv)) 2314 pm_runtime_put(kdev); 2315 } 2316 2317 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 2318 { 2319 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2320 struct i915_power_well *power_well; 2321 int i; 2322 2323 mutex_lock(&power_domains->lock); 2324 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 2325 power_well->ops->sync_hw(dev_priv, power_well); 2326 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 2327 power_well); 2328 } 2329 mutex_unlock(&power_domains->lock); 2330 } 2331 2332 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 2333 { 2334 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 2335 POSTING_READ(DBUF_CTL); 2336 2337 udelay(10); 2338 2339 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 2340 DRM_ERROR("DBuf power enable timeout\n"); 2341 } 2342 2343 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 2344 { 2345 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 2346 POSTING_READ(DBUF_CTL); 2347 2348 udelay(10); 2349 2350 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 2351 DRM_ERROR("DBuf power disable timeout!\n"); 2352 } 2353 2354 static void skl_display_core_init(struct drm_i915_private *dev_priv, 2355 bool resume) 2356 { 2357 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2358 struct i915_power_well *well; 2359 uint32_t val; 2360 2361 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2362 2363 /* enable PCH reset handshake */ 2364 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2365 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); 2366 2367 /* enable PG1 and Misc I/O */ 2368 mutex_lock(&power_domains->lock); 2369 2370 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2371 intel_power_well_enable(dev_priv, well); 2372 2373 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2374 intel_power_well_enable(dev_priv, well); 2375 2376 mutex_unlock(&power_domains->lock); 2377 2378 skl_init_cdclk(dev_priv); 2379 2380 gen9_dbuf_enable(dev_priv); 2381 2382 if (resume && dev_priv->csr.dmc_payload) 2383 intel_csr_load_program(dev_priv); 2384 } 2385 2386 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 2387 { 2388 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2389 struct i915_power_well *well; 2390 2391 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2392 2393 gen9_dbuf_disable(dev_priv); 2394 2395 skl_uninit_cdclk(dev_priv); 2396 2397 /* The spec doesn't call for removing the reset handshake flag */ 2398 /* disable PG1 and Misc I/O */ 2399 2400 mutex_lock(&power_domains->lock); 2401 2402 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2403 intel_power_well_disable(dev_priv, well); 2404 2405 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2406 intel_power_well_disable(dev_priv, well); 2407 2408 mutex_unlock(&power_domains->lock); 2409 } 2410 2411 void bxt_display_core_init(struct drm_i915_private *dev_priv, 2412 bool resume) 2413 { 2414 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2415 struct i915_power_well *well; 2416 uint32_t val; 2417 2418 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2419 2420 /* 2421 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 2422 * or else the reset will hang because there is no PCH to respond. 2423 * Move the handshake programming to initialization sequence. 2424 * Previously was left up to BIOS. 2425 */ 2426 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2427 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 2428 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 2429 2430 /* Enable PG1 */ 2431 mutex_lock(&power_domains->lock); 2432 2433 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2434 intel_power_well_enable(dev_priv, well); 2435 2436 mutex_unlock(&power_domains->lock); 2437 2438 bxt_init_cdclk(dev_priv); 2439 2440 gen9_dbuf_enable(dev_priv); 2441 2442 if (resume && dev_priv->csr.dmc_payload) 2443 intel_csr_load_program(dev_priv); 2444 } 2445 2446 void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 2447 { 2448 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2449 struct i915_power_well *well; 2450 2451 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2452 2453 gen9_dbuf_disable(dev_priv); 2454 2455 bxt_uninit_cdclk(dev_priv); 2456 2457 /* The spec doesn't call for removing the reset handshake flag */ 2458 2459 /* Disable PG1 */ 2460 mutex_lock(&power_domains->lock); 2461 2462 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2463 intel_power_well_disable(dev_priv, well); 2464 2465 mutex_unlock(&power_domains->lock); 2466 } 2467 2468 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 2469 { 2470 struct i915_power_well *cmn_bc = 2471 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2472 struct i915_power_well *cmn_d = 2473 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 2474 2475 /* 2476 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 2477 * workaround never ever read DISPLAY_PHY_CONTROL, and 2478 * instead maintain a shadow copy ourselves. Use the actual 2479 * power well state and lane status to reconstruct the 2480 * expected initial value. 2481 */ 2482 dev_priv->chv_phy_control = 2483 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 2484 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 2485 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 2486 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 2487 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 2488 2489 /* 2490 * If all lanes are disabled we leave the override disabled 2491 * with all power down bits cleared to match the state we 2492 * would use after disabling the port. Otherwise enable the 2493 * override and set the lane powerdown bits accding to the 2494 * current lane status. 2495 */ 2496 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 2497 uint32_t status = I915_READ(DPLL(PIPE_A)); 2498 unsigned int mask; 2499 2500 mask = status & DPLL_PORTB_READY_MASK; 2501 if (mask == 0xf) 2502 mask = 0x0; 2503 else 2504 dev_priv->chv_phy_control |= 2505 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 2506 2507 dev_priv->chv_phy_control |= 2508 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 2509 2510 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 2511 if (mask == 0xf) 2512 mask = 0x0; 2513 else 2514 dev_priv->chv_phy_control |= 2515 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 2516 2517 dev_priv->chv_phy_control |= 2518 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 2519 2520 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 2521 2522 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 2523 } else { 2524 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 2525 } 2526 2527 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 2528 uint32_t status = I915_READ(DPIO_PHY_STATUS); 2529 unsigned int mask; 2530 2531 mask = status & DPLL_PORTD_READY_MASK; 2532 2533 if (mask == 0xf) 2534 mask = 0x0; 2535 else 2536 dev_priv->chv_phy_control |= 2537 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 2538 2539 dev_priv->chv_phy_control |= 2540 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 2541 2542 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 2543 2544 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 2545 } else { 2546 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 2547 } 2548 2549 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 2550 2551 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 2552 dev_priv->chv_phy_control); 2553 } 2554 2555 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2556 { 2557 struct i915_power_well *cmn = 2558 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2559 struct i915_power_well *disp2d = 2560 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 2561 2562 /* If the display might be already active skip this */ 2563 if (cmn->ops->is_enabled(dev_priv, cmn) && 2564 disp2d->ops->is_enabled(dev_priv, disp2d) && 2565 I915_READ(DPIO_CTL) & DPIO_CMNRST) 2566 return; 2567 2568 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 2569 2570 /* cmnlane needs DPLL registers */ 2571 disp2d->ops->enable(dev_priv, disp2d); 2572 2573 /* 2574 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 2575 * Need to assert and de-assert PHY SB reset by gating the 2576 * common lane power, then un-gating it. 2577 * Simply ungating isn't enough to reset the PHY enough to get 2578 * ports and lanes running. 2579 */ 2580 cmn->ops->disable(dev_priv, cmn); 2581 } 2582 2583 /** 2584 * intel_power_domains_init_hw - initialize hardware power domain state 2585 * @dev_priv: i915 device instance 2586 * @resume: Called from resume code paths or not 2587 * 2588 * This function initializes the hardware power domain state and enables all 2589 * power domains using intel_display_set_init_power(). 2590 */ 2591 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2592 { 2593 struct drm_device *dev = &dev_priv->drm; 2594 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2595 2596 power_domains->initializing = true; 2597 2598 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 2599 skl_display_core_init(dev_priv, resume); 2600 } else if (IS_BROXTON(dev)) { 2601 bxt_display_core_init(dev_priv, resume); 2602 } else if (IS_CHERRYVIEW(dev)) { 2603 mutex_lock(&power_domains->lock); 2604 chv_phy_control_init(dev_priv); 2605 mutex_unlock(&power_domains->lock); 2606 } else if (IS_VALLEYVIEW(dev)) { 2607 mutex_lock(&power_domains->lock); 2608 vlv_cmnlane_wa(dev_priv); 2609 mutex_unlock(&power_domains->lock); 2610 } 2611 2612 /* For now, we need the power well to be always enabled. */ 2613 intel_display_set_init_power(dev_priv, true); 2614 /* Disable power support if the user asked so. */ 2615 if (!i915.disable_power_well) 2616 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 2617 intel_power_domains_sync_hw(dev_priv); 2618 power_domains->initializing = false; 2619 } 2620 2621 /** 2622 * intel_power_domains_suspend - suspend power domain state 2623 * @dev_priv: i915 device instance 2624 * 2625 * This function prepares the hardware power domain state before entering 2626 * system suspend. It must be paired with intel_power_domains_init_hw(). 2627 */ 2628 void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2629 { 2630 /* 2631 * Even if power well support was disabled we still want to disable 2632 * power wells while we are system suspended. 2633 */ 2634 if (!i915.disable_power_well) 2635 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2636 2637 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 2638 skl_display_core_uninit(dev_priv); 2639 else if (IS_BROXTON(dev_priv)) 2640 bxt_display_core_uninit(dev_priv); 2641 } 2642 2643 /** 2644 * intel_runtime_pm_get - grab a runtime pm reference 2645 * @dev_priv: i915 device instance 2646 * 2647 * This function grabs a device-level runtime pm reference (mostly used for GEM 2648 * code to ensure the GTT or GT is on) and ensures that it is powered up. 2649 * 2650 * Any runtime pm reference obtained by this function must have a symmetric 2651 * call to intel_runtime_pm_put() to release the reference again. 2652 */ 2653 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2654 { 2655 struct pci_dev *pdev = dev_priv->drm.pdev; 2656 struct device *kdev = &pdev->dev; 2657 2658 pm_runtime_get_sync(kdev); 2659 2660 atomic_inc(&dev_priv->pm.wakeref_count); 2661 assert_rpm_wakelock_held(dev_priv); 2662 } 2663 2664 /** 2665 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use 2666 * @dev_priv: i915 device instance 2667 * 2668 * This function grabs a device-level runtime pm reference if the device is 2669 * already in use and ensures that it is powered up. 2670 * 2671 * Any runtime pm reference obtained by this function must have a symmetric 2672 * call to intel_runtime_pm_put() to release the reference again. 2673 */ 2674 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 2675 { 2676 struct pci_dev *pdev = dev_priv->drm.pdev; 2677 struct device *kdev = &pdev->dev; 2678 2679 if (IS_ENABLED(CONFIG_PM)) { 2680 int ret = pm_runtime_get_if_in_use(kdev); 2681 2682 /* 2683 * In cases runtime PM is disabled by the RPM core and we get 2684 * an -EINVAL return value we are not supposed to call this 2685 * function, since the power state is undefined. This applies 2686 * atm to the late/early system suspend/resume handlers. 2687 */ 2688 WARN_ON_ONCE(ret < 0); 2689 if (ret <= 0) 2690 return false; 2691 } 2692 2693 atomic_inc(&dev_priv->pm.wakeref_count); 2694 assert_rpm_wakelock_held(dev_priv); 2695 2696 return true; 2697 } 2698 2699 /** 2700 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2701 * @dev_priv: i915 device instance 2702 * 2703 * This function grabs a device-level runtime pm reference (mostly used for GEM 2704 * code to ensure the GTT or GT is on). 2705 * 2706 * It will _not_ power up the device but instead only check that it's powered 2707 * on. Therefore it is only valid to call this functions from contexts where 2708 * the device is known to be powered up and where trying to power it up would 2709 * result in hilarity and deadlocks. That pretty much means only the system 2710 * suspend/resume code where this is used to grab runtime pm references for 2711 * delayed setup down in work items. 2712 * 2713 * Any runtime pm reference obtained by this function must have a symmetric 2714 * call to intel_runtime_pm_put() to release the reference again. 2715 */ 2716 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2717 { 2718 struct pci_dev *pdev = dev_priv->drm.pdev; 2719 struct device *kdev = &pdev->dev; 2720 2721 assert_rpm_wakelock_held(dev_priv); 2722 pm_runtime_get_noresume(kdev); 2723 2724 atomic_inc(&dev_priv->pm.wakeref_count); 2725 } 2726 2727 /** 2728 * intel_runtime_pm_put - release a runtime pm reference 2729 * @dev_priv: i915 device instance 2730 * 2731 * This function drops the device-level runtime pm reference obtained by 2732 * intel_runtime_pm_get() and might power down the corresponding 2733 * hardware block right away if this is the last reference. 2734 */ 2735 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2736 { 2737 struct pci_dev *pdev = dev_priv->drm.pdev; 2738 struct device *kdev = &pdev->dev; 2739 2740 assert_rpm_wakelock_held(dev_priv); 2741 if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) 2742 atomic_inc(&dev_priv->pm.atomic_seq); 2743 2744 pm_runtime_mark_last_busy(kdev); 2745 pm_runtime_put_autosuspend(kdev); 2746 } 2747 2748 /** 2749 * intel_runtime_pm_enable - enable runtime pm 2750 * @dev_priv: i915 device instance 2751 * 2752 * This function enables runtime pm at the end of the driver load sequence. 2753 * 2754 * Note that this function does currently not enable runtime pm for the 2755 * subordinate display power domains. That is only done on the first modeset 2756 * using intel_display_set_init_power(). 2757 */ 2758 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2759 { 2760 struct pci_dev *pdev = dev_priv->drm.pdev; 2761 struct drm_device *dev = &dev_priv->drm; 2762 struct device *kdev = &pdev->dev; 2763 2764 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ 2765 pm_runtime_mark_last_busy(kdev); 2766 2767 /* 2768 * Take a permanent reference to disable the RPM functionality and drop 2769 * it only when unloading the driver. Use the low level get/put helpers, 2770 * so the driver's own RPM reference tracking asserts also work on 2771 * platforms without RPM support. 2772 */ 2773 if (!HAS_RUNTIME_PM(dev)) { 2774 pm_runtime_dont_use_autosuspend(kdev); 2775 pm_runtime_get_sync(kdev); 2776 } else { 2777 pm_runtime_use_autosuspend(kdev); 2778 } 2779 2780 /* 2781 * The core calls the driver load handler with an RPM reference held. 2782 * We drop that here and will reacquire it during unloading in 2783 * intel_power_domains_fini(). 2784 */ 2785 pm_runtime_put_autosuspend(kdev); 2786 } 2787