1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include <linux/pm_runtime.h> 30 #include <linux/vgaarb.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: runtime pm 37 * 38 * The i915 driver supports dynamic enabling and disabling of entire hardware 39 * blocks at runtime. This is especially important on the display side where 40 * software is supposed to control many power gates manually on recent hardware, 41 * since on the GT side a lot of the power management is done by the hardware. 42 * But even there some manual control at the device level is required. 43 * 44 * Since i915 supports a diverse set of platforms with a unified codebase and 45 * hardware engineers just love to shuffle functionality around between power 46 * domains there's a sizeable amount of indirection required. This file provides 47 * generic functions to the driver for grabbing and releasing references for 48 * abstract power domains. It then maps those to the actual power wells 49 * present for a given platform. 50 */ 51 52 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 53 for (i = 0; \ 54 i < (power_domains)->power_well_count && \ 55 ((power_well) = &(power_domains)->power_wells[i]); \ 56 i++) \ 57 for_each_if ((power_well)->domains & (domain_mask)) 58 59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 60 for (i = (power_domains)->power_well_count - 1; \ 61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 62 i--) \ 63 for_each_if ((power_well)->domains & (domain_mask)) 64 65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 66 int power_well_id); 67 68 const char * 69 intel_display_power_domain_str(enum intel_display_power_domain domain) 70 { 71 switch (domain) { 72 case POWER_DOMAIN_PIPE_A: 73 return "PIPE_A"; 74 case POWER_DOMAIN_PIPE_B: 75 return "PIPE_B"; 76 case POWER_DOMAIN_PIPE_C: 77 return "PIPE_C"; 78 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 79 return "PIPE_A_PANEL_FITTER"; 80 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 81 return "PIPE_B_PANEL_FITTER"; 82 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 83 return "PIPE_C_PANEL_FITTER"; 84 case POWER_DOMAIN_TRANSCODER_A: 85 return "TRANSCODER_A"; 86 case POWER_DOMAIN_TRANSCODER_B: 87 return "TRANSCODER_B"; 88 case POWER_DOMAIN_TRANSCODER_C: 89 return "TRANSCODER_C"; 90 case POWER_DOMAIN_TRANSCODER_EDP: 91 return "TRANSCODER_EDP"; 92 case POWER_DOMAIN_PORT_DDI_A_LANES: 93 return "PORT_DDI_A_LANES"; 94 case POWER_DOMAIN_PORT_DDI_B_LANES: 95 return "PORT_DDI_B_LANES"; 96 case POWER_DOMAIN_PORT_DDI_C_LANES: 97 return "PORT_DDI_C_LANES"; 98 case POWER_DOMAIN_PORT_DDI_D_LANES: 99 return "PORT_DDI_D_LANES"; 100 case POWER_DOMAIN_PORT_DDI_E_LANES: 101 return "PORT_DDI_E_LANES"; 102 case POWER_DOMAIN_PORT_DSI: 103 return "PORT_DSI"; 104 case POWER_DOMAIN_PORT_CRT: 105 return "PORT_CRT"; 106 case POWER_DOMAIN_PORT_OTHER: 107 return "PORT_OTHER"; 108 case POWER_DOMAIN_VGA: 109 return "VGA"; 110 case POWER_DOMAIN_AUDIO: 111 return "AUDIO"; 112 case POWER_DOMAIN_PLLS: 113 return "PLLS"; 114 case POWER_DOMAIN_AUX_A: 115 return "AUX_A"; 116 case POWER_DOMAIN_AUX_B: 117 return "AUX_B"; 118 case POWER_DOMAIN_AUX_C: 119 return "AUX_C"; 120 case POWER_DOMAIN_AUX_D: 121 return "AUX_D"; 122 case POWER_DOMAIN_GMBUS: 123 return "GMBUS"; 124 case POWER_DOMAIN_INIT: 125 return "INIT"; 126 case POWER_DOMAIN_MODESET: 127 return "MODESET"; 128 default: 129 MISSING_CASE(domain); 130 return "?"; 131 } 132 } 133 134 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 135 struct i915_power_well *power_well) 136 { 137 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 138 power_well->ops->enable(dev_priv, power_well); 139 power_well->hw_enabled = true; 140 } 141 142 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 143 struct i915_power_well *power_well) 144 { 145 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 146 power_well->hw_enabled = false; 147 power_well->ops->disable(dev_priv, power_well); 148 } 149 150 /* 151 * We should only use the power well if we explicitly asked the hardware to 152 * enable it, so check if it's enabled and also check if we've requested it to 153 * be enabled. 154 */ 155 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 156 struct i915_power_well *power_well) 157 { 158 return I915_READ(HSW_PWR_WELL_DRIVER) == 159 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 160 } 161 162 /** 163 * __intel_display_power_is_enabled - unlocked check for a power domain 164 * @dev_priv: i915 device instance 165 * @domain: power domain to check 166 * 167 * This is the unlocked version of intel_display_power_is_enabled() and should 168 * only be used from error capture and recovery code where deadlocks are 169 * possible. 170 * 171 * Returns: 172 * True when the power domain is enabled, false otherwise. 173 */ 174 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 175 enum intel_display_power_domain domain) 176 { 177 struct i915_power_domains *power_domains; 178 struct i915_power_well *power_well; 179 bool is_enabled; 180 int i; 181 182 if (dev_priv->pm.suspended) 183 return false; 184 185 power_domains = &dev_priv->power_domains; 186 187 is_enabled = true; 188 189 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 190 if (power_well->always_on) 191 continue; 192 193 if (!power_well->hw_enabled) { 194 is_enabled = false; 195 break; 196 } 197 } 198 199 return is_enabled; 200 } 201 202 /** 203 * intel_display_power_is_enabled - check for a power domain 204 * @dev_priv: i915 device instance 205 * @domain: power domain to check 206 * 207 * This function can be used to check the hw power domain state. It is mostly 208 * used in hardware state readout functions. Everywhere else code should rely 209 * upon explicit power domain reference counting to ensure that the hardware 210 * block is powered up before accessing it. 211 * 212 * Callers must hold the relevant modesetting locks to ensure that concurrent 213 * threads can't disable the power well while the caller tries to read a few 214 * registers. 215 * 216 * Returns: 217 * True when the power domain is enabled, false otherwise. 218 */ 219 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 220 enum intel_display_power_domain domain) 221 { 222 struct i915_power_domains *power_domains; 223 bool ret; 224 225 power_domains = &dev_priv->power_domains; 226 227 mutex_lock(&power_domains->lock); 228 ret = __intel_display_power_is_enabled(dev_priv, domain); 229 mutex_unlock(&power_domains->lock); 230 231 return ret; 232 } 233 234 /** 235 * intel_display_set_init_power - set the initial power domain state 236 * @dev_priv: i915 device instance 237 * @enable: whether to enable or disable the initial power domain state 238 * 239 * For simplicity our driver load/unload and system suspend/resume code assumes 240 * that all power domains are always enabled. This functions controls the state 241 * of this little hack. While the initial power domain state is enabled runtime 242 * pm is effectively disabled. 243 */ 244 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 245 bool enable) 246 { 247 if (dev_priv->power_domains.init_power_on == enable) 248 return; 249 250 if (enable) 251 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 252 else 253 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 254 255 dev_priv->power_domains.init_power_on = enable; 256 } 257 258 /* 259 * Starting with Haswell, we have a "Power Down Well" that can be turned off 260 * when not needed anymore. We have 4 registers that can request the power well 261 * to be enabled, and it will only be disabled if none of the registers is 262 * requesting it to be enabled. 263 */ 264 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 265 { 266 struct drm_device *dev = dev_priv->dev; 267 268 /* 269 * After we re-enable the power well, if we touch VGA register 0x3d5 270 * we'll get unclaimed register interrupts. This stops after we write 271 * anything to the VGA MSR register. The vgacon module uses this 272 * register all the time, so if we unbind our driver and, as a 273 * consequence, bind vgacon, we'll get stuck in an infinite loop at 274 * console_unlock(). So make here we touch the VGA MSR register, making 275 * sure vgacon can keep working normally without triggering interrupts 276 * and error messages. 277 */ 278 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 279 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 280 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 281 282 if (IS_BROADWELL(dev)) 283 gen8_irq_power_well_post_enable(dev_priv, 284 1 << PIPE_C | 1 << PIPE_B); 285 } 286 287 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 288 struct i915_power_well *power_well) 289 { 290 struct drm_device *dev = dev_priv->dev; 291 292 /* 293 * After we re-enable the power well, if we touch VGA register 0x3d5 294 * we'll get unclaimed register interrupts. This stops after we write 295 * anything to the VGA MSR register. The vgacon module uses this 296 * register all the time, so if we unbind our driver and, as a 297 * consequence, bind vgacon, we'll get stuck in an infinite loop at 298 * console_unlock(). So make here we touch the VGA MSR register, making 299 * sure vgacon can keep working normally without triggering interrupts 300 * and error messages. 301 */ 302 if (power_well->data == SKL_DISP_PW_2) { 303 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 304 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 305 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 306 307 gen8_irq_power_well_post_enable(dev_priv, 308 1 << PIPE_C | 1 << PIPE_B); 309 } 310 } 311 312 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 313 struct i915_power_well *power_well, bool enable) 314 { 315 bool is_enabled, enable_requested; 316 uint32_t tmp; 317 318 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 319 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 320 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 321 322 if (enable) { 323 if (!enable_requested) 324 I915_WRITE(HSW_PWR_WELL_DRIVER, 325 HSW_PWR_WELL_ENABLE_REQUEST); 326 327 if (!is_enabled) { 328 DRM_DEBUG_KMS("Enabling power well\n"); 329 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 330 HSW_PWR_WELL_STATE_ENABLED), 20)) 331 DRM_ERROR("Timeout enabling power well\n"); 332 hsw_power_well_post_enable(dev_priv); 333 } 334 335 } else { 336 if (enable_requested) { 337 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 338 POSTING_READ(HSW_PWR_WELL_DRIVER); 339 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 340 } 341 } 342 } 343 344 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 345 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 346 BIT(POWER_DOMAIN_PIPE_B) | \ 347 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 348 BIT(POWER_DOMAIN_PIPE_C) | \ 349 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 350 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 351 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 352 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 353 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 354 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 355 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 356 BIT(POWER_DOMAIN_AUX_B) | \ 357 BIT(POWER_DOMAIN_AUX_C) | \ 358 BIT(POWER_DOMAIN_AUX_D) | \ 359 BIT(POWER_DOMAIN_AUDIO) | \ 360 BIT(POWER_DOMAIN_VGA) | \ 361 BIT(POWER_DOMAIN_INIT)) 362 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 363 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 364 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 365 BIT(POWER_DOMAIN_INIT)) 366 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 367 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 368 BIT(POWER_DOMAIN_INIT)) 369 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 370 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 371 BIT(POWER_DOMAIN_INIT)) 372 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 373 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 374 BIT(POWER_DOMAIN_INIT)) 375 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 376 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 377 BIT(POWER_DOMAIN_MODESET) | \ 378 BIT(POWER_DOMAIN_AUX_A) | \ 379 BIT(POWER_DOMAIN_INIT)) 380 #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 381 (POWER_DOMAIN_MASK & ~( \ 382 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 383 SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \ 384 BIT(POWER_DOMAIN_INIT)) 385 386 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 387 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 388 BIT(POWER_DOMAIN_PIPE_B) | \ 389 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 390 BIT(POWER_DOMAIN_PIPE_C) | \ 391 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 392 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 393 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 394 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 395 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 396 BIT(POWER_DOMAIN_AUX_B) | \ 397 BIT(POWER_DOMAIN_AUX_C) | \ 398 BIT(POWER_DOMAIN_AUDIO) | \ 399 BIT(POWER_DOMAIN_VGA) | \ 400 BIT(POWER_DOMAIN_GMBUS) | \ 401 BIT(POWER_DOMAIN_INIT)) 402 #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 403 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 404 BIT(POWER_DOMAIN_PIPE_A) | \ 405 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 406 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 407 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 408 BIT(POWER_DOMAIN_AUX_A) | \ 409 BIT(POWER_DOMAIN_PLLS) | \ 410 BIT(POWER_DOMAIN_INIT)) 411 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 412 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 413 BIT(POWER_DOMAIN_MODESET) | \ 414 BIT(POWER_DOMAIN_AUX_A) | \ 415 BIT(POWER_DOMAIN_INIT)) 416 #define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 417 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 418 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \ 419 BIT(POWER_DOMAIN_INIT)) 420 421 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 422 { 423 struct drm_device *dev = dev_priv->dev; 424 425 WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n"); 426 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 427 "DC9 already programmed to be enabled.\n"); 428 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 429 "DC5 still not disabled to enable DC9.\n"); 430 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); 431 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); 432 433 /* 434 * TODO: check for the following to verify the conditions to enter DC9 435 * state are satisfied: 436 * 1] Check relevant display engine registers to verify if mode set 437 * disable sequence was followed. 438 * 2] Check if display uninitialize sequence is initialized. 439 */ 440 } 441 442 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 443 { 444 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); 445 WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 446 "DC9 already programmed to be disabled.\n"); 447 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 448 "DC5 still not disabled.\n"); 449 450 /* 451 * TODO: check for the following to verify DC9 state was indeed 452 * entered before programming to disable it: 453 * 1] Check relevant display engine registers to verify if mode 454 * set disable sequence was followed. 455 * 2] Check if display uninitialize sequence is initialized. 456 */ 457 } 458 459 static void gen9_set_dc_state_debugmask_memory_up( 460 struct drm_i915_private *dev_priv) 461 { 462 uint32_t val; 463 464 /* The below bit doesn't need to be cleared ever afterwards */ 465 val = I915_READ(DC_STATE_DEBUG); 466 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { 467 val |= DC_STATE_DEBUG_MASK_MEMORY_UP; 468 I915_WRITE(DC_STATE_DEBUG, val); 469 POSTING_READ(DC_STATE_DEBUG); 470 } 471 } 472 473 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 474 { 475 uint32_t val; 476 uint32_t mask; 477 478 mask = DC_STATE_EN_UPTO_DC5; 479 if (IS_BROXTON(dev_priv)) 480 mask |= DC_STATE_EN_DC9; 481 else 482 mask |= DC_STATE_EN_UPTO_DC6; 483 484 WARN_ON_ONCE(state & ~mask); 485 486 if (i915.enable_dc == 0) 487 state = DC_STATE_DISABLE; 488 else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5) 489 state = DC_STATE_EN_UPTO_DC5; 490 491 if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK) 492 gen9_set_dc_state_debugmask_memory_up(dev_priv); 493 494 val = I915_READ(DC_STATE_EN); 495 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 496 val & mask, state); 497 val &= ~mask; 498 val |= state; 499 I915_WRITE(DC_STATE_EN, val); 500 POSTING_READ(DC_STATE_EN); 501 } 502 503 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 504 { 505 assert_can_enable_dc9(dev_priv); 506 507 DRM_DEBUG_KMS("Enabling DC9\n"); 508 509 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 510 } 511 512 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 513 { 514 assert_can_disable_dc9(dev_priv); 515 516 DRM_DEBUG_KMS("Disabling DC9\n"); 517 518 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 519 } 520 521 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 522 { 523 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 524 "CSR program storage start is NULL\n"); 525 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 526 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 527 } 528 529 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 530 { 531 struct drm_device *dev = dev_priv->dev; 532 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 533 SKL_DISP_PW_2); 534 535 WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev), 536 "Platform doesn't support DC5.\n"); 537 WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); 538 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 539 540 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 541 "DC5 already programmed to be enabled.\n"); 542 assert_rpm_wakelock_held(dev_priv); 543 544 assert_csr_loaded(dev_priv); 545 } 546 547 static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) 548 { 549 /* 550 * During initialization, the firmware may not be loaded yet. 551 * We still want to make sure that the DC enabling flag is cleared. 552 */ 553 if (dev_priv->power_domains.initializing) 554 return; 555 556 assert_rpm_wakelock_held(dev_priv); 557 } 558 559 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 560 { 561 assert_can_enable_dc5(dev_priv); 562 563 DRM_DEBUG_KMS("Enabling DC5\n"); 564 565 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 566 } 567 568 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 569 { 570 struct drm_device *dev = dev_priv->dev; 571 572 WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev), 573 "Platform doesn't support DC6.\n"); 574 WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); 575 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 576 "Backlight is not disabled.\n"); 577 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 578 "DC6 already programmed to be enabled.\n"); 579 580 assert_csr_loaded(dev_priv); 581 } 582 583 static void assert_can_disable_dc6(struct drm_i915_private *dev_priv) 584 { 585 /* 586 * During initialization, the firmware may not be loaded yet. 587 * We still want to make sure that the DC enabling flag is cleared. 588 */ 589 if (dev_priv->power_domains.initializing) 590 return; 591 592 WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 593 "DC6 already programmed to be disabled.\n"); 594 } 595 596 static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv) 597 { 598 assert_can_disable_dc5(dev_priv); 599 600 if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 601 i915.enable_dc != 0 && i915.enable_dc != 1) 602 assert_can_disable_dc6(dev_priv); 603 604 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 605 } 606 607 void skl_enable_dc6(struct drm_i915_private *dev_priv) 608 { 609 assert_can_enable_dc6(dev_priv); 610 611 DRM_DEBUG_KMS("Enabling DC6\n"); 612 613 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 614 615 } 616 617 void skl_disable_dc6(struct drm_i915_private *dev_priv) 618 { 619 assert_can_disable_dc6(dev_priv); 620 621 DRM_DEBUG_KMS("Disabling DC6\n"); 622 623 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 624 } 625 626 static void skl_set_power_well(struct drm_i915_private *dev_priv, 627 struct i915_power_well *power_well, bool enable) 628 { 629 uint32_t tmp, fuse_status; 630 uint32_t req_mask, state_mask; 631 bool is_enabled, enable_requested, check_fuse_status = false; 632 633 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 634 fuse_status = I915_READ(SKL_FUSE_STATUS); 635 636 switch (power_well->data) { 637 case SKL_DISP_PW_1: 638 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 639 SKL_FUSE_PG0_DIST_STATUS), 1)) { 640 DRM_ERROR("PG0 not enabled\n"); 641 return; 642 } 643 break; 644 case SKL_DISP_PW_2: 645 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) { 646 DRM_ERROR("PG1 in disabled state\n"); 647 return; 648 } 649 break; 650 case SKL_DISP_PW_DDI_A_E: 651 case SKL_DISP_PW_DDI_B: 652 case SKL_DISP_PW_DDI_C: 653 case SKL_DISP_PW_DDI_D: 654 case SKL_DISP_PW_MISC_IO: 655 break; 656 default: 657 WARN(1, "Unknown power well %lu\n", power_well->data); 658 return; 659 } 660 661 req_mask = SKL_POWER_WELL_REQ(power_well->data); 662 enable_requested = tmp & req_mask; 663 state_mask = SKL_POWER_WELL_STATE(power_well->data); 664 is_enabled = tmp & state_mask; 665 666 if (enable) { 667 if (!enable_requested) { 668 WARN((tmp & state_mask) && 669 !I915_READ(HSW_PWR_WELL_BIOS), 670 "Invalid for power well status to be enabled, unless done by the BIOS, \ 671 when request is to disable!\n"); 672 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 673 } 674 675 if (!is_enabled) { 676 DRM_DEBUG_KMS("Enabling %s\n", power_well->name); 677 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 678 state_mask), 1)) 679 DRM_ERROR("%s enable timeout\n", 680 power_well->name); 681 check_fuse_status = true; 682 } 683 } else { 684 if (enable_requested) { 685 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 686 POSTING_READ(HSW_PWR_WELL_DRIVER); 687 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 688 } 689 } 690 691 if (check_fuse_status) { 692 if (power_well->data == SKL_DISP_PW_1) { 693 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 694 SKL_FUSE_PG1_DIST_STATUS), 1)) 695 DRM_ERROR("PG1 distributing status timeout\n"); 696 } else if (power_well->data == SKL_DISP_PW_2) { 697 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 698 SKL_FUSE_PG2_DIST_STATUS), 1)) 699 DRM_ERROR("PG2 distributing status timeout\n"); 700 } 701 } 702 703 if (enable && !is_enabled) 704 skl_power_well_post_enable(dev_priv, power_well); 705 } 706 707 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 708 struct i915_power_well *power_well) 709 { 710 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 711 712 /* 713 * We're taking over the BIOS, so clear any requests made by it since 714 * the driver is in charge now. 715 */ 716 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 717 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 718 } 719 720 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 721 struct i915_power_well *power_well) 722 { 723 hsw_set_power_well(dev_priv, power_well, true); 724 } 725 726 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 727 struct i915_power_well *power_well) 728 { 729 hsw_set_power_well(dev_priv, power_well, false); 730 } 731 732 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv, 733 struct i915_power_well *power_well) 734 { 735 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) | 736 SKL_POWER_WELL_STATE(power_well->data); 737 738 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask; 739 } 740 741 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv, 742 struct i915_power_well *power_well) 743 { 744 skl_set_power_well(dev_priv, power_well, power_well->count > 0); 745 746 /* Clear any request made by BIOS as driver is taking over */ 747 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 748 } 749 750 static void skl_power_well_enable(struct drm_i915_private *dev_priv, 751 struct i915_power_well *power_well) 752 { 753 skl_set_power_well(dev_priv, power_well, true); 754 } 755 756 static void skl_power_well_disable(struct drm_i915_private *dev_priv, 757 struct i915_power_well *power_well) 758 { 759 skl_set_power_well(dev_priv, power_well, false); 760 } 761 762 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 763 struct i915_power_well *power_well) 764 { 765 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 766 } 767 768 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 769 struct i915_power_well *power_well) 770 { 771 gen9_disable_dc5_dc6(dev_priv); 772 } 773 774 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 775 struct i915_power_well *power_well) 776 { 777 if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 778 i915.enable_dc != 0 && i915.enable_dc != 1) 779 skl_enable_dc6(dev_priv); 780 else 781 gen9_enable_dc5(dev_priv); 782 } 783 784 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, 785 struct i915_power_well *power_well) 786 { 787 if (power_well->count > 0) { 788 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 789 } else { 790 if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 791 i915.enable_dc != 0 && 792 i915.enable_dc != 1) 793 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 794 else 795 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 796 } 797 } 798 799 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 800 struct i915_power_well *power_well) 801 { 802 } 803 804 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 805 struct i915_power_well *power_well) 806 { 807 return true; 808 } 809 810 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 811 struct i915_power_well *power_well, bool enable) 812 { 813 enum punit_power_well power_well_id = power_well->data; 814 u32 mask; 815 u32 state; 816 u32 ctrl; 817 818 mask = PUNIT_PWRGT_MASK(power_well_id); 819 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 820 PUNIT_PWRGT_PWR_GATE(power_well_id); 821 822 mutex_lock(&dev_priv->rps.hw_lock); 823 824 #define COND \ 825 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 826 827 if (COND) 828 goto out; 829 830 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 831 ctrl &= ~mask; 832 ctrl |= state; 833 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 834 835 if (wait_for(COND, 100)) 836 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 837 state, 838 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 839 840 #undef COND 841 842 out: 843 mutex_unlock(&dev_priv->rps.hw_lock); 844 } 845 846 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 847 struct i915_power_well *power_well) 848 { 849 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 850 } 851 852 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 853 struct i915_power_well *power_well) 854 { 855 vlv_set_power_well(dev_priv, power_well, true); 856 } 857 858 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 859 struct i915_power_well *power_well) 860 { 861 vlv_set_power_well(dev_priv, power_well, false); 862 } 863 864 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 865 struct i915_power_well *power_well) 866 { 867 int power_well_id = power_well->data; 868 bool enabled = false; 869 u32 mask; 870 u32 state; 871 u32 ctrl; 872 873 mask = PUNIT_PWRGT_MASK(power_well_id); 874 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 875 876 mutex_lock(&dev_priv->rps.hw_lock); 877 878 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 879 /* 880 * We only ever set the power-on and power-gate states, anything 881 * else is unexpected. 882 */ 883 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 884 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 885 if (state == ctrl) 886 enabled = true; 887 888 /* 889 * A transient state at this point would mean some unexpected party 890 * is poking at the power controls too. 891 */ 892 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 893 WARN_ON(ctrl != state); 894 895 mutex_unlock(&dev_priv->rps.hw_lock); 896 897 return enabled; 898 } 899 900 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 901 { 902 enum pipe pipe; 903 904 /* 905 * Enable the CRI clock source so we can get at the 906 * display and the reference clock for VGA 907 * hotplug / manual detection. Supposedly DSI also 908 * needs the ref clock up and running. 909 * 910 * CHV DPLL B/C have some issues if VGA mode is enabled. 911 */ 912 for_each_pipe(dev_priv->dev, pipe) { 913 u32 val = I915_READ(DPLL(pipe)); 914 915 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 916 if (pipe != PIPE_A) 917 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 918 919 I915_WRITE(DPLL(pipe), val); 920 } 921 922 spin_lock_irq(&dev_priv->irq_lock); 923 valleyview_enable_display_irqs(dev_priv); 924 spin_unlock_irq(&dev_priv->irq_lock); 925 926 /* 927 * During driver initialization/resume we can avoid restoring the 928 * part of the HW/SW state that will be inited anyway explicitly. 929 */ 930 if (dev_priv->power_domains.initializing) 931 return; 932 933 intel_hpd_init(dev_priv); 934 935 i915_redisable_vga_power_on(dev_priv->dev); 936 } 937 938 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 939 { 940 spin_lock_irq(&dev_priv->irq_lock); 941 valleyview_disable_display_irqs(dev_priv); 942 spin_unlock_irq(&dev_priv->irq_lock); 943 944 vlv_power_sequencer_reset(dev_priv); 945 } 946 947 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 948 struct i915_power_well *power_well) 949 { 950 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 951 952 vlv_set_power_well(dev_priv, power_well, true); 953 954 vlv_display_power_well_init(dev_priv); 955 } 956 957 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 958 struct i915_power_well *power_well) 959 { 960 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 961 962 vlv_display_power_well_deinit(dev_priv); 963 964 vlv_set_power_well(dev_priv, power_well, false); 965 } 966 967 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 968 struct i915_power_well *power_well) 969 { 970 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 971 972 /* since ref/cri clock was enabled */ 973 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 974 975 vlv_set_power_well(dev_priv, power_well, true); 976 977 /* 978 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 979 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 980 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 981 * b. The other bits such as sfr settings / modesel may all 982 * be set to 0. 983 * 984 * This should only be done on init and resume from S3 with 985 * both PLLs disabled, or we risk losing DPIO and PLL 986 * synchronization. 987 */ 988 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 989 } 990 991 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 992 struct i915_power_well *power_well) 993 { 994 enum pipe pipe; 995 996 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 997 998 for_each_pipe(dev_priv, pipe) 999 assert_pll_disabled(dev_priv, pipe); 1000 1001 /* Assert common reset */ 1002 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1003 1004 vlv_set_power_well(dev_priv, power_well, false); 1005 } 1006 1007 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 1008 1009 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 1010 int power_well_id) 1011 { 1012 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1013 int i; 1014 1015 for (i = 0; i < power_domains->power_well_count; i++) { 1016 struct i915_power_well *power_well; 1017 1018 power_well = &power_domains->power_wells[i]; 1019 if (power_well->data == power_well_id) 1020 return power_well; 1021 } 1022 1023 return NULL; 1024 } 1025 1026 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1027 1028 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1029 { 1030 struct i915_power_well *cmn_bc = 1031 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1032 struct i915_power_well *cmn_d = 1033 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1034 u32 phy_control = dev_priv->chv_phy_control; 1035 u32 phy_status = 0; 1036 u32 phy_status_mask = 0xffffffff; 1037 u32 tmp; 1038 1039 /* 1040 * The BIOS can leave the PHY is some weird state 1041 * where it doesn't fully power down some parts. 1042 * Disable the asserts until the PHY has been fully 1043 * reset (ie. the power well has been disabled at 1044 * least once). 1045 */ 1046 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1047 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1048 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1049 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1050 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1051 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1052 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1053 1054 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1055 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1056 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1057 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1058 1059 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1060 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1061 1062 /* this assumes override is only used to enable lanes */ 1063 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1064 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1065 1066 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1067 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1068 1069 /* CL1 is on whenever anything is on in either channel */ 1070 if (BITS_SET(phy_control, 1071 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1072 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1073 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1074 1075 /* 1076 * The DPLLB check accounts for the pipe B + port A usage 1077 * with CL2 powered up but all the lanes in the second channel 1078 * powered down. 1079 */ 1080 if (BITS_SET(phy_control, 1081 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1082 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1083 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1084 1085 if (BITS_SET(phy_control, 1086 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1087 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1088 if (BITS_SET(phy_control, 1089 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1090 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1091 1092 if (BITS_SET(phy_control, 1093 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1094 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1095 if (BITS_SET(phy_control, 1096 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1097 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1098 } 1099 1100 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1101 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1102 1103 /* this assumes override is only used to enable lanes */ 1104 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1105 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1106 1107 if (BITS_SET(phy_control, 1108 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1109 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1110 1111 if (BITS_SET(phy_control, 1112 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1113 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1114 if (BITS_SET(phy_control, 1115 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1116 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1117 } 1118 1119 phy_status &= phy_status_mask; 1120 1121 /* 1122 * The PHY may be busy with some initial calibration and whatnot, 1123 * so the power state can take a while to actually change. 1124 */ 1125 if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10)) 1126 WARN(phy_status != tmp, 1127 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1128 tmp, phy_status, dev_priv->chv_phy_control); 1129 } 1130 1131 #undef BITS_SET 1132 1133 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1134 struct i915_power_well *power_well) 1135 { 1136 enum dpio_phy phy; 1137 enum pipe pipe; 1138 uint32_t tmp; 1139 1140 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1141 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1142 1143 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1144 pipe = PIPE_A; 1145 phy = DPIO_PHY0; 1146 } else { 1147 pipe = PIPE_C; 1148 phy = DPIO_PHY1; 1149 } 1150 1151 /* since ref/cri clock was enabled */ 1152 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1153 vlv_set_power_well(dev_priv, power_well, true); 1154 1155 /* Poll for phypwrgood signal */ 1156 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 1157 DRM_ERROR("Display PHY %d is not power up\n", phy); 1158 1159 mutex_lock(&dev_priv->sb_lock); 1160 1161 /* Enable dynamic power down */ 1162 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1163 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1164 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1165 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1166 1167 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1168 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1169 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1170 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1171 } else { 1172 /* 1173 * Force the non-existing CL2 off. BXT does this 1174 * too, so maybe it saves some power even though 1175 * CL2 doesn't exist? 1176 */ 1177 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1178 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1179 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1180 } 1181 1182 mutex_unlock(&dev_priv->sb_lock); 1183 1184 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1185 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1186 1187 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1188 phy, dev_priv->chv_phy_control); 1189 1190 assert_chv_phy_status(dev_priv); 1191 } 1192 1193 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1194 struct i915_power_well *power_well) 1195 { 1196 enum dpio_phy phy; 1197 1198 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1199 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1200 1201 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1202 phy = DPIO_PHY0; 1203 assert_pll_disabled(dev_priv, PIPE_A); 1204 assert_pll_disabled(dev_priv, PIPE_B); 1205 } else { 1206 phy = DPIO_PHY1; 1207 assert_pll_disabled(dev_priv, PIPE_C); 1208 } 1209 1210 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1211 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1212 1213 vlv_set_power_well(dev_priv, power_well, false); 1214 1215 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1216 phy, dev_priv->chv_phy_control); 1217 1218 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1219 dev_priv->chv_phy_assert[phy] = true; 1220 1221 assert_chv_phy_status(dev_priv); 1222 } 1223 1224 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1225 enum dpio_channel ch, bool override, unsigned int mask) 1226 { 1227 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1228 u32 reg, val, expected, actual; 1229 1230 /* 1231 * The BIOS can leave the PHY is some weird state 1232 * where it doesn't fully power down some parts. 1233 * Disable the asserts until the PHY has been fully 1234 * reset (ie. the power well has been disabled at 1235 * least once). 1236 */ 1237 if (!dev_priv->chv_phy_assert[phy]) 1238 return; 1239 1240 if (ch == DPIO_CH0) 1241 reg = _CHV_CMN_DW0_CH0; 1242 else 1243 reg = _CHV_CMN_DW6_CH1; 1244 1245 mutex_lock(&dev_priv->sb_lock); 1246 val = vlv_dpio_read(dev_priv, pipe, reg); 1247 mutex_unlock(&dev_priv->sb_lock); 1248 1249 /* 1250 * This assumes !override is only used when the port is disabled. 1251 * All lanes should power down even without the override when 1252 * the port is disabled. 1253 */ 1254 if (!override || mask == 0xf) { 1255 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1256 /* 1257 * If CH1 common lane is not active anymore 1258 * (eg. for pipe B DPLL) the entire channel will 1259 * shut down, which causes the common lane registers 1260 * to read as 0. That means we can't actually check 1261 * the lane power down status bits, but as the entire 1262 * register reads as 0 it's a good indication that the 1263 * channel is indeed entirely powered down. 1264 */ 1265 if (ch == DPIO_CH1 && val == 0) 1266 expected = 0; 1267 } else if (mask != 0x0) { 1268 expected = DPIO_ANYDL_POWERDOWN; 1269 } else { 1270 expected = 0; 1271 } 1272 1273 if (ch == DPIO_CH0) 1274 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1275 else 1276 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1277 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1278 1279 WARN(actual != expected, 1280 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1281 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1282 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1283 reg, val); 1284 } 1285 1286 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1287 enum dpio_channel ch, bool override) 1288 { 1289 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1290 bool was_override; 1291 1292 mutex_lock(&power_domains->lock); 1293 1294 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1295 1296 if (override == was_override) 1297 goto out; 1298 1299 if (override) 1300 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1301 else 1302 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1303 1304 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1305 1306 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1307 phy, ch, dev_priv->chv_phy_control); 1308 1309 assert_chv_phy_status(dev_priv); 1310 1311 out: 1312 mutex_unlock(&power_domains->lock); 1313 1314 return was_override; 1315 } 1316 1317 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1318 bool override, unsigned int mask) 1319 { 1320 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1321 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1322 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1323 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1324 1325 mutex_lock(&power_domains->lock); 1326 1327 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1328 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1329 1330 if (override) 1331 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1332 else 1333 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1334 1335 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1336 1337 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1338 phy, ch, mask, dev_priv->chv_phy_control); 1339 1340 assert_chv_phy_status(dev_priv); 1341 1342 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1343 1344 mutex_unlock(&power_domains->lock); 1345 } 1346 1347 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1348 struct i915_power_well *power_well) 1349 { 1350 enum pipe pipe = power_well->data; 1351 bool enabled; 1352 u32 state, ctrl; 1353 1354 mutex_lock(&dev_priv->rps.hw_lock); 1355 1356 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1357 /* 1358 * We only ever set the power-on and power-gate states, anything 1359 * else is unexpected. 1360 */ 1361 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1362 enabled = state == DP_SSS_PWR_ON(pipe); 1363 1364 /* 1365 * A transient state at this point would mean some unexpected party 1366 * is poking at the power controls too. 1367 */ 1368 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1369 WARN_ON(ctrl << 16 != state); 1370 1371 mutex_unlock(&dev_priv->rps.hw_lock); 1372 1373 return enabled; 1374 } 1375 1376 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1377 struct i915_power_well *power_well, 1378 bool enable) 1379 { 1380 enum pipe pipe = power_well->data; 1381 u32 state; 1382 u32 ctrl; 1383 1384 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1385 1386 mutex_lock(&dev_priv->rps.hw_lock); 1387 1388 #define COND \ 1389 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1390 1391 if (COND) 1392 goto out; 1393 1394 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1395 ctrl &= ~DP_SSC_MASK(pipe); 1396 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1397 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1398 1399 if (wait_for(COND, 100)) 1400 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1401 state, 1402 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1403 1404 #undef COND 1405 1406 out: 1407 mutex_unlock(&dev_priv->rps.hw_lock); 1408 } 1409 1410 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1411 struct i915_power_well *power_well) 1412 { 1413 WARN_ON_ONCE(power_well->data != PIPE_A); 1414 1415 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 1416 } 1417 1418 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1419 struct i915_power_well *power_well) 1420 { 1421 WARN_ON_ONCE(power_well->data != PIPE_A); 1422 1423 chv_set_pipe_power_well(dev_priv, power_well, true); 1424 1425 vlv_display_power_well_init(dev_priv); 1426 } 1427 1428 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1429 struct i915_power_well *power_well) 1430 { 1431 WARN_ON_ONCE(power_well->data != PIPE_A); 1432 1433 vlv_display_power_well_deinit(dev_priv); 1434 1435 chv_set_pipe_power_well(dev_priv, power_well, false); 1436 } 1437 1438 /** 1439 * intel_display_power_get - grab a power domain reference 1440 * @dev_priv: i915 device instance 1441 * @domain: power domain to reference 1442 * 1443 * This function grabs a power domain reference for @domain and ensures that the 1444 * power domain and all its parents are powered up. Therefore users should only 1445 * grab a reference to the innermost power domain they need. 1446 * 1447 * Any power domain reference obtained by this function must have a symmetric 1448 * call to intel_display_power_put() to release the reference again. 1449 */ 1450 void intel_display_power_get(struct drm_i915_private *dev_priv, 1451 enum intel_display_power_domain domain) 1452 { 1453 struct i915_power_domains *power_domains; 1454 struct i915_power_well *power_well; 1455 int i; 1456 1457 intel_runtime_pm_get(dev_priv); 1458 1459 power_domains = &dev_priv->power_domains; 1460 1461 mutex_lock(&power_domains->lock); 1462 1463 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1464 if (!power_well->count++) 1465 intel_power_well_enable(dev_priv, power_well); 1466 } 1467 1468 power_domains->domain_use_count[domain]++; 1469 1470 mutex_unlock(&power_domains->lock); 1471 } 1472 1473 /** 1474 * intel_display_power_put - release a power domain reference 1475 * @dev_priv: i915 device instance 1476 * @domain: power domain to reference 1477 * 1478 * This function drops the power domain reference obtained by 1479 * intel_display_power_get() and might power down the corresponding hardware 1480 * block right away if this is the last reference. 1481 */ 1482 void intel_display_power_put(struct drm_i915_private *dev_priv, 1483 enum intel_display_power_domain domain) 1484 { 1485 struct i915_power_domains *power_domains; 1486 struct i915_power_well *power_well; 1487 int i; 1488 1489 power_domains = &dev_priv->power_domains; 1490 1491 mutex_lock(&power_domains->lock); 1492 1493 WARN(!power_domains->domain_use_count[domain], 1494 "Use count on domain %s is already zero\n", 1495 intel_display_power_domain_str(domain)); 1496 power_domains->domain_use_count[domain]--; 1497 1498 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 1499 WARN(!power_well->count, 1500 "Use count on power well %s is already zero", 1501 power_well->name); 1502 1503 if (!--power_well->count) 1504 intel_power_well_disable(dev_priv, power_well); 1505 } 1506 1507 mutex_unlock(&power_domains->lock); 1508 1509 intel_runtime_pm_put(dev_priv); 1510 } 1511 1512 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 1513 BIT(POWER_DOMAIN_PIPE_A) | \ 1514 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 1515 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 1516 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1517 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1518 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1519 BIT(POWER_DOMAIN_PORT_CRT) | \ 1520 BIT(POWER_DOMAIN_PLLS) | \ 1521 BIT(POWER_DOMAIN_AUX_A) | \ 1522 BIT(POWER_DOMAIN_AUX_B) | \ 1523 BIT(POWER_DOMAIN_AUX_C) | \ 1524 BIT(POWER_DOMAIN_AUX_D) | \ 1525 BIT(POWER_DOMAIN_GMBUS) | \ 1526 BIT(POWER_DOMAIN_INIT)) 1527 #define HSW_DISPLAY_POWER_DOMAINS ( \ 1528 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 1529 BIT(POWER_DOMAIN_INIT)) 1530 1531 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \ 1532 HSW_ALWAYS_ON_POWER_DOMAINS | \ 1533 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) 1534 #define BDW_DISPLAY_POWER_DOMAINS ( \ 1535 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ 1536 BIT(POWER_DOMAIN_INIT)) 1537 1538 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) 1539 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK 1540 1541 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1542 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1543 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1544 BIT(POWER_DOMAIN_PORT_CRT) | \ 1545 BIT(POWER_DOMAIN_AUX_B) | \ 1546 BIT(POWER_DOMAIN_AUX_C) | \ 1547 BIT(POWER_DOMAIN_INIT)) 1548 1549 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1550 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1551 BIT(POWER_DOMAIN_AUX_B) | \ 1552 BIT(POWER_DOMAIN_INIT)) 1553 1554 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1555 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1556 BIT(POWER_DOMAIN_AUX_B) | \ 1557 BIT(POWER_DOMAIN_INIT)) 1558 1559 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1560 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1561 BIT(POWER_DOMAIN_AUX_C) | \ 1562 BIT(POWER_DOMAIN_INIT)) 1563 1564 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1565 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1566 BIT(POWER_DOMAIN_AUX_C) | \ 1567 BIT(POWER_DOMAIN_INIT)) 1568 1569 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1570 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1571 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1572 BIT(POWER_DOMAIN_AUX_B) | \ 1573 BIT(POWER_DOMAIN_AUX_C) | \ 1574 BIT(POWER_DOMAIN_INIT)) 1575 1576 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1577 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1578 BIT(POWER_DOMAIN_AUX_D) | \ 1579 BIT(POWER_DOMAIN_INIT)) 1580 1581 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1582 .sync_hw = i9xx_always_on_power_well_noop, 1583 .enable = i9xx_always_on_power_well_noop, 1584 .disable = i9xx_always_on_power_well_noop, 1585 .is_enabled = i9xx_always_on_power_well_enabled, 1586 }; 1587 1588 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 1589 .sync_hw = chv_pipe_power_well_sync_hw, 1590 .enable = chv_pipe_power_well_enable, 1591 .disable = chv_pipe_power_well_disable, 1592 .is_enabled = chv_pipe_power_well_enabled, 1593 }; 1594 1595 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1596 .sync_hw = vlv_power_well_sync_hw, 1597 .enable = chv_dpio_cmn_power_well_enable, 1598 .disable = chv_dpio_cmn_power_well_disable, 1599 .is_enabled = vlv_power_well_enabled, 1600 }; 1601 1602 static struct i915_power_well i9xx_always_on_power_well[] = { 1603 { 1604 .name = "always-on", 1605 .always_on = 1, 1606 .domains = POWER_DOMAIN_MASK, 1607 .ops = &i9xx_always_on_power_well_ops, 1608 }, 1609 }; 1610 1611 static const struct i915_power_well_ops hsw_power_well_ops = { 1612 .sync_hw = hsw_power_well_sync_hw, 1613 .enable = hsw_power_well_enable, 1614 .disable = hsw_power_well_disable, 1615 .is_enabled = hsw_power_well_enabled, 1616 }; 1617 1618 static const struct i915_power_well_ops skl_power_well_ops = { 1619 .sync_hw = skl_power_well_sync_hw, 1620 .enable = skl_power_well_enable, 1621 .disable = skl_power_well_disable, 1622 .is_enabled = skl_power_well_enabled, 1623 }; 1624 1625 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1626 .sync_hw = gen9_dc_off_power_well_sync_hw, 1627 .enable = gen9_dc_off_power_well_enable, 1628 .disable = gen9_dc_off_power_well_disable, 1629 .is_enabled = gen9_dc_off_power_well_enabled, 1630 }; 1631 1632 static struct i915_power_well hsw_power_wells[] = { 1633 { 1634 .name = "always-on", 1635 .always_on = 1, 1636 .domains = HSW_ALWAYS_ON_POWER_DOMAINS, 1637 .ops = &i9xx_always_on_power_well_ops, 1638 }, 1639 { 1640 .name = "display", 1641 .domains = HSW_DISPLAY_POWER_DOMAINS, 1642 .ops = &hsw_power_well_ops, 1643 }, 1644 }; 1645 1646 static struct i915_power_well bdw_power_wells[] = { 1647 { 1648 .name = "always-on", 1649 .always_on = 1, 1650 .domains = BDW_ALWAYS_ON_POWER_DOMAINS, 1651 .ops = &i9xx_always_on_power_well_ops, 1652 }, 1653 { 1654 .name = "display", 1655 .domains = BDW_DISPLAY_POWER_DOMAINS, 1656 .ops = &hsw_power_well_ops, 1657 }, 1658 }; 1659 1660 static const struct i915_power_well_ops vlv_display_power_well_ops = { 1661 .sync_hw = vlv_power_well_sync_hw, 1662 .enable = vlv_display_power_well_enable, 1663 .disable = vlv_display_power_well_disable, 1664 .is_enabled = vlv_power_well_enabled, 1665 }; 1666 1667 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1668 .sync_hw = vlv_power_well_sync_hw, 1669 .enable = vlv_dpio_cmn_power_well_enable, 1670 .disable = vlv_dpio_cmn_power_well_disable, 1671 .is_enabled = vlv_power_well_enabled, 1672 }; 1673 1674 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1675 .sync_hw = vlv_power_well_sync_hw, 1676 .enable = vlv_power_well_enable, 1677 .disable = vlv_power_well_disable, 1678 .is_enabled = vlv_power_well_enabled, 1679 }; 1680 1681 static struct i915_power_well vlv_power_wells[] = { 1682 { 1683 .name = "always-on", 1684 .always_on = 1, 1685 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1686 .ops = &i9xx_always_on_power_well_ops, 1687 .data = PUNIT_POWER_WELL_ALWAYS_ON, 1688 }, 1689 { 1690 .name = "display", 1691 .domains = VLV_DISPLAY_POWER_DOMAINS, 1692 .data = PUNIT_POWER_WELL_DISP2D, 1693 .ops = &vlv_display_power_well_ops, 1694 }, 1695 { 1696 .name = "dpio-tx-b-01", 1697 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1698 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1699 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1700 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1701 .ops = &vlv_dpio_power_well_ops, 1702 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 1703 }, 1704 { 1705 .name = "dpio-tx-b-23", 1706 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1707 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1708 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1709 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1710 .ops = &vlv_dpio_power_well_ops, 1711 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 1712 }, 1713 { 1714 .name = "dpio-tx-c-01", 1715 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1716 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1717 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1718 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1719 .ops = &vlv_dpio_power_well_ops, 1720 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 1721 }, 1722 { 1723 .name = "dpio-tx-c-23", 1724 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1725 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1726 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1727 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1728 .ops = &vlv_dpio_power_well_ops, 1729 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 1730 }, 1731 { 1732 .name = "dpio-common", 1733 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 1734 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 1735 .ops = &vlv_dpio_cmn_power_well_ops, 1736 }, 1737 }; 1738 1739 static struct i915_power_well chv_power_wells[] = { 1740 { 1741 .name = "always-on", 1742 .always_on = 1, 1743 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1744 .ops = &i9xx_always_on_power_well_ops, 1745 }, 1746 { 1747 .name = "display", 1748 /* 1749 * Pipe A power well is the new disp2d well. Pipe B and C 1750 * power wells don't actually exist. Pipe A power well is 1751 * required for any pipe to work. 1752 */ 1753 .domains = VLV_DISPLAY_POWER_DOMAINS, 1754 .data = PIPE_A, 1755 .ops = &chv_pipe_power_well_ops, 1756 }, 1757 { 1758 .name = "dpio-common-bc", 1759 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 1760 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 1761 .ops = &chv_dpio_cmn_power_well_ops, 1762 }, 1763 { 1764 .name = "dpio-common-d", 1765 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 1766 .data = PUNIT_POWER_WELL_DPIO_CMN_D, 1767 .ops = &chv_dpio_cmn_power_well_ops, 1768 }, 1769 }; 1770 1771 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 1772 int power_well_id) 1773 { 1774 struct i915_power_well *power_well; 1775 bool ret; 1776 1777 power_well = lookup_power_well(dev_priv, power_well_id); 1778 ret = power_well->ops->is_enabled(dev_priv, power_well); 1779 1780 return ret; 1781 } 1782 1783 static struct i915_power_well skl_power_wells[] = { 1784 { 1785 .name = "always-on", 1786 .always_on = 1, 1787 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS, 1788 .ops = &i9xx_always_on_power_well_ops, 1789 .data = SKL_DISP_PW_ALWAYS_ON, 1790 }, 1791 { 1792 .name = "power well 1", 1793 /* Handled by the DMC firmware */ 1794 .domains = 0, 1795 .ops = &skl_power_well_ops, 1796 .data = SKL_DISP_PW_1, 1797 }, 1798 { 1799 .name = "MISC IO power well", 1800 /* Handled by the DMC firmware */ 1801 .domains = 0, 1802 .ops = &skl_power_well_ops, 1803 .data = SKL_DISP_PW_MISC_IO, 1804 }, 1805 { 1806 .name = "DC off", 1807 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 1808 .ops = &gen9_dc_off_power_well_ops, 1809 .data = SKL_DISP_PW_DC_OFF, 1810 }, 1811 { 1812 .name = "power well 2", 1813 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1814 .ops = &skl_power_well_ops, 1815 .data = SKL_DISP_PW_2, 1816 }, 1817 { 1818 .name = "DDI A/E power well", 1819 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS, 1820 .ops = &skl_power_well_ops, 1821 .data = SKL_DISP_PW_DDI_A_E, 1822 }, 1823 { 1824 .name = "DDI B power well", 1825 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS, 1826 .ops = &skl_power_well_ops, 1827 .data = SKL_DISP_PW_DDI_B, 1828 }, 1829 { 1830 .name = "DDI C power well", 1831 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS, 1832 .ops = &skl_power_well_ops, 1833 .data = SKL_DISP_PW_DDI_C, 1834 }, 1835 { 1836 .name = "DDI D power well", 1837 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS, 1838 .ops = &skl_power_well_ops, 1839 .data = SKL_DISP_PW_DDI_D, 1840 }, 1841 }; 1842 1843 void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv) 1844 { 1845 struct i915_power_well *well; 1846 1847 if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) 1848 return; 1849 1850 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1851 intel_power_well_enable(dev_priv, well); 1852 1853 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 1854 intel_power_well_enable(dev_priv, well); 1855 } 1856 1857 void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv) 1858 { 1859 struct i915_power_well *well; 1860 1861 if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) 1862 return; 1863 1864 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1865 intel_power_well_disable(dev_priv, well); 1866 1867 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 1868 intel_power_well_disable(dev_priv, well); 1869 } 1870 1871 static struct i915_power_well bxt_power_wells[] = { 1872 { 1873 .name = "always-on", 1874 .always_on = 1, 1875 .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS, 1876 .ops = &i9xx_always_on_power_well_ops, 1877 }, 1878 { 1879 .name = "power well 1", 1880 .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS, 1881 .ops = &skl_power_well_ops, 1882 .data = SKL_DISP_PW_1, 1883 }, 1884 { 1885 .name = "DC off", 1886 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 1887 .ops = &gen9_dc_off_power_well_ops, 1888 .data = SKL_DISP_PW_DC_OFF, 1889 }, 1890 { 1891 .name = "power well 2", 1892 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1893 .ops = &skl_power_well_ops, 1894 .data = SKL_DISP_PW_2, 1895 }, 1896 }; 1897 1898 static int 1899 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 1900 int disable_power_well) 1901 { 1902 if (disable_power_well >= 0) 1903 return !!disable_power_well; 1904 1905 if (IS_BROXTON(dev_priv)) { 1906 DRM_DEBUG_KMS("Disabling display power well support\n"); 1907 return 0; 1908 } 1909 1910 return 1; 1911 } 1912 1913 #define set_power_wells(power_domains, __power_wells) ({ \ 1914 (power_domains)->power_wells = (__power_wells); \ 1915 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 1916 }) 1917 1918 /** 1919 * intel_power_domains_init - initializes the power domain structures 1920 * @dev_priv: i915 device instance 1921 * 1922 * Initializes the power domain structures for @dev_priv depending upon the 1923 * supported platform. 1924 */ 1925 int intel_power_domains_init(struct drm_i915_private *dev_priv) 1926 { 1927 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1928 1929 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 1930 i915.disable_power_well); 1931 1932 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); 1933 1934 mutex_init(&power_domains->lock); 1935 1936 /* 1937 * The enabling order will be from lower to higher indexed wells, 1938 * the disabling order is reversed. 1939 */ 1940 if (IS_HASWELL(dev_priv->dev)) { 1941 set_power_wells(power_domains, hsw_power_wells); 1942 } else if (IS_BROADWELL(dev_priv->dev)) { 1943 set_power_wells(power_domains, bdw_power_wells); 1944 } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) { 1945 set_power_wells(power_domains, skl_power_wells); 1946 } else if (IS_BROXTON(dev_priv->dev)) { 1947 set_power_wells(power_domains, bxt_power_wells); 1948 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1949 set_power_wells(power_domains, chv_power_wells); 1950 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 1951 set_power_wells(power_domains, vlv_power_wells); 1952 } else { 1953 set_power_wells(power_domains, i9xx_always_on_power_well); 1954 } 1955 1956 return 0; 1957 } 1958 1959 /** 1960 * intel_power_domains_fini - finalizes the power domain structures 1961 * @dev_priv: i915 device instance 1962 * 1963 * Finalizes the power domain structures for @dev_priv depending upon the 1964 * supported platform. This function also disables runtime pm and ensures that 1965 * the device stays powered up so that the driver can be reloaded. 1966 */ 1967 void intel_power_domains_fini(struct drm_i915_private *dev_priv) 1968 { 1969 struct device *device = &dev_priv->dev->pdev->dev; 1970 1971 /* 1972 * The i915.ko module is still not prepared to be loaded when 1973 * the power well is not enabled, so just enable it in case 1974 * we're going to unload/reload. 1975 * The following also reacquires the RPM reference the core passed 1976 * to the driver during loading, which is dropped in 1977 * intel_runtime_pm_enable(). We have to hand back the control of the 1978 * device to the core with this reference held. 1979 */ 1980 intel_display_set_init_power(dev_priv, true); 1981 1982 /* Remove the refcount we took to keep power well support disabled. */ 1983 if (!i915.disable_power_well) 1984 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1985 1986 /* 1987 * Remove the refcount we took in intel_runtime_pm_enable() in case 1988 * the platform doesn't support runtime PM. 1989 */ 1990 if (!HAS_RUNTIME_PM(dev_priv)) 1991 pm_runtime_put(device); 1992 } 1993 1994 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 1995 { 1996 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1997 struct i915_power_well *power_well; 1998 int i; 1999 2000 mutex_lock(&power_domains->lock); 2001 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 2002 power_well->ops->sync_hw(dev_priv, power_well); 2003 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 2004 power_well); 2005 } 2006 mutex_unlock(&power_domains->lock); 2007 } 2008 2009 static void skl_display_core_init(struct drm_i915_private *dev_priv, 2010 bool resume) 2011 { 2012 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2013 uint32_t val; 2014 2015 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2016 2017 /* enable PCH reset handshake */ 2018 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2019 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); 2020 2021 /* enable PG1 and Misc I/O */ 2022 mutex_lock(&power_domains->lock); 2023 skl_pw1_misc_io_init(dev_priv); 2024 mutex_unlock(&power_domains->lock); 2025 2026 if (!resume) 2027 return; 2028 2029 skl_init_cdclk(dev_priv); 2030 2031 if (dev_priv->csr.dmc_payload) 2032 intel_csr_load_program(dev_priv); 2033 } 2034 2035 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 2036 { 2037 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2038 2039 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2040 2041 skl_uninit_cdclk(dev_priv); 2042 2043 /* The spec doesn't call for removing the reset handshake flag */ 2044 /* disable PG1 and Misc I/O */ 2045 mutex_lock(&power_domains->lock); 2046 skl_pw1_misc_io_fini(dev_priv); 2047 mutex_unlock(&power_domains->lock); 2048 } 2049 2050 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 2051 { 2052 struct i915_power_well *cmn_bc = 2053 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2054 struct i915_power_well *cmn_d = 2055 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 2056 2057 /* 2058 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 2059 * workaround never ever read DISPLAY_PHY_CONTROL, and 2060 * instead maintain a shadow copy ourselves. Use the actual 2061 * power well state and lane status to reconstruct the 2062 * expected initial value. 2063 */ 2064 dev_priv->chv_phy_control = 2065 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 2066 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 2067 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 2068 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 2069 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 2070 2071 /* 2072 * If all lanes are disabled we leave the override disabled 2073 * with all power down bits cleared to match the state we 2074 * would use after disabling the port. Otherwise enable the 2075 * override and set the lane powerdown bits accding to the 2076 * current lane status. 2077 */ 2078 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 2079 uint32_t status = I915_READ(DPLL(PIPE_A)); 2080 unsigned int mask; 2081 2082 mask = status & DPLL_PORTB_READY_MASK; 2083 if (mask == 0xf) 2084 mask = 0x0; 2085 else 2086 dev_priv->chv_phy_control |= 2087 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 2088 2089 dev_priv->chv_phy_control |= 2090 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 2091 2092 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 2093 if (mask == 0xf) 2094 mask = 0x0; 2095 else 2096 dev_priv->chv_phy_control |= 2097 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 2098 2099 dev_priv->chv_phy_control |= 2100 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 2101 2102 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 2103 2104 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 2105 } else { 2106 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 2107 } 2108 2109 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 2110 uint32_t status = I915_READ(DPIO_PHY_STATUS); 2111 unsigned int mask; 2112 2113 mask = status & DPLL_PORTD_READY_MASK; 2114 2115 if (mask == 0xf) 2116 mask = 0x0; 2117 else 2118 dev_priv->chv_phy_control |= 2119 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 2120 2121 dev_priv->chv_phy_control |= 2122 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 2123 2124 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 2125 2126 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 2127 } else { 2128 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 2129 } 2130 2131 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 2132 2133 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 2134 dev_priv->chv_phy_control); 2135 } 2136 2137 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2138 { 2139 struct i915_power_well *cmn = 2140 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2141 struct i915_power_well *disp2d = 2142 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 2143 2144 /* If the display might be already active skip this */ 2145 if (cmn->ops->is_enabled(dev_priv, cmn) && 2146 disp2d->ops->is_enabled(dev_priv, disp2d) && 2147 I915_READ(DPIO_CTL) & DPIO_CMNRST) 2148 return; 2149 2150 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 2151 2152 /* cmnlane needs DPLL registers */ 2153 disp2d->ops->enable(dev_priv, disp2d); 2154 2155 /* 2156 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 2157 * Need to assert and de-assert PHY SB reset by gating the 2158 * common lane power, then un-gating it. 2159 * Simply ungating isn't enough to reset the PHY enough to get 2160 * ports and lanes running. 2161 */ 2162 cmn->ops->disable(dev_priv, cmn); 2163 } 2164 2165 /** 2166 * intel_power_domains_init_hw - initialize hardware power domain state 2167 * @dev_priv: i915 device instance 2168 * 2169 * This function initializes the hardware power domain state and enables all 2170 * power domains using intel_display_set_init_power(). 2171 */ 2172 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2173 { 2174 struct drm_device *dev = dev_priv->dev; 2175 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2176 2177 power_domains->initializing = true; 2178 2179 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 2180 skl_display_core_init(dev_priv, resume); 2181 } else if (IS_CHERRYVIEW(dev)) { 2182 mutex_lock(&power_domains->lock); 2183 chv_phy_control_init(dev_priv); 2184 mutex_unlock(&power_domains->lock); 2185 } else if (IS_VALLEYVIEW(dev)) { 2186 mutex_lock(&power_domains->lock); 2187 vlv_cmnlane_wa(dev_priv); 2188 mutex_unlock(&power_domains->lock); 2189 } 2190 2191 /* For now, we need the power well to be always enabled. */ 2192 intel_display_set_init_power(dev_priv, true); 2193 /* Disable power support if the user asked so. */ 2194 if (!i915.disable_power_well) 2195 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 2196 intel_power_domains_sync_hw(dev_priv); 2197 power_domains->initializing = false; 2198 } 2199 2200 /** 2201 * intel_power_domains_suspend - suspend power domain state 2202 * @dev_priv: i915 device instance 2203 * 2204 * This function prepares the hardware power domain state before entering 2205 * system suspend. It must be paired with intel_power_domains_init_hw(). 2206 */ 2207 void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2208 { 2209 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 2210 skl_display_core_uninit(dev_priv); 2211 2212 /* 2213 * Even if power well support was disabled we still want to disable 2214 * power wells while we are system suspended. 2215 */ 2216 if (!i915.disable_power_well) 2217 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2218 } 2219 2220 /** 2221 * intel_runtime_pm_get - grab a runtime pm reference 2222 * @dev_priv: i915 device instance 2223 * 2224 * This function grabs a device-level runtime pm reference (mostly used for GEM 2225 * code to ensure the GTT or GT is on) and ensures that it is powered up. 2226 * 2227 * Any runtime pm reference obtained by this function must have a symmetric 2228 * call to intel_runtime_pm_put() to release the reference again. 2229 */ 2230 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2231 { 2232 struct drm_device *dev = dev_priv->dev; 2233 struct device *device = &dev->pdev->dev; 2234 2235 pm_runtime_get_sync(device); 2236 2237 atomic_inc(&dev_priv->pm.wakeref_count); 2238 assert_rpm_wakelock_held(dev_priv); 2239 } 2240 2241 /** 2242 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2243 * @dev_priv: i915 device instance 2244 * 2245 * This function grabs a device-level runtime pm reference (mostly used for GEM 2246 * code to ensure the GTT or GT is on). 2247 * 2248 * It will _not_ power up the device but instead only check that it's powered 2249 * on. Therefore it is only valid to call this functions from contexts where 2250 * the device is known to be powered up and where trying to power it up would 2251 * result in hilarity and deadlocks. That pretty much means only the system 2252 * suspend/resume code where this is used to grab runtime pm references for 2253 * delayed setup down in work items. 2254 * 2255 * Any runtime pm reference obtained by this function must have a symmetric 2256 * call to intel_runtime_pm_put() to release the reference again. 2257 */ 2258 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2259 { 2260 struct drm_device *dev = dev_priv->dev; 2261 struct device *device = &dev->pdev->dev; 2262 2263 assert_rpm_wakelock_held(dev_priv); 2264 pm_runtime_get_noresume(device); 2265 2266 atomic_inc(&dev_priv->pm.wakeref_count); 2267 } 2268 2269 /** 2270 * intel_runtime_pm_put - release a runtime pm reference 2271 * @dev_priv: i915 device instance 2272 * 2273 * This function drops the device-level runtime pm reference obtained by 2274 * intel_runtime_pm_get() and might power down the corresponding 2275 * hardware block right away if this is the last reference. 2276 */ 2277 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2278 { 2279 struct drm_device *dev = dev_priv->dev; 2280 struct device *device = &dev->pdev->dev; 2281 2282 assert_rpm_wakelock_held(dev_priv); 2283 if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) 2284 atomic_inc(&dev_priv->pm.atomic_seq); 2285 2286 pm_runtime_mark_last_busy(device); 2287 pm_runtime_put_autosuspend(device); 2288 } 2289 2290 /** 2291 * intel_runtime_pm_enable - enable runtime pm 2292 * @dev_priv: i915 device instance 2293 * 2294 * This function enables runtime pm at the end of the driver load sequence. 2295 * 2296 * Note that this function does currently not enable runtime pm for the 2297 * subordinate display power domains. That is only done on the first modeset 2298 * using intel_display_set_init_power(). 2299 */ 2300 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2301 { 2302 struct drm_device *dev = dev_priv->dev; 2303 struct device *device = &dev->pdev->dev; 2304 2305 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 2306 pm_runtime_mark_last_busy(device); 2307 2308 /* 2309 * Take a permanent reference to disable the RPM functionality and drop 2310 * it only when unloading the driver. Use the low level get/put helpers, 2311 * so the driver's own RPM reference tracking asserts also work on 2312 * platforms without RPM support. 2313 */ 2314 if (!HAS_RUNTIME_PM(dev)) { 2315 pm_runtime_dont_use_autosuspend(device); 2316 pm_runtime_get_sync(device); 2317 } else { 2318 pm_runtime_use_autosuspend(device); 2319 } 2320 2321 /* 2322 * The core calls the driver load handler with an RPM reference held. 2323 * We drop that here and will reacquire it during unloading in 2324 * intel_power_domains_fini(). 2325 */ 2326 pm_runtime_put_autosuspend(device); 2327 } 2328 2329