1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "i915_reg.h" 11 #include "intel_backlight_regs.h" 12 #include "intel_cdclk.h" 13 #include "intel_clock_gating.h" 14 #include "intel_combo_phy.h" 15 #include "intel_de.h" 16 #include "intel_display_power.h" 17 #include "intel_display_power_map.h" 18 #include "intel_display_power_well.h" 19 #include "intel_display_rpm.h" 20 #include "intel_display_types.h" 21 #include "intel_dmc.h" 22 #include "intel_mchbar_regs.h" 23 #include "intel_pch_refclk.h" 24 #include "intel_pcode.h" 25 #include "intel_pmdemand.h" 26 #include "intel_pps_regs.h" 27 #include "intel_snps_phy.h" 28 #include "skl_watermark.h" 29 #include "skl_watermark_regs.h" 30 #include "vlv_sideband.h" 31 32 #define for_each_power_domain_well(__display, __power_well, __domain) \ 33 for_each_power_well((__display), __power_well) \ 34 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 35 36 #define for_each_power_domain_well_reverse(__display, __power_well, __domain) \ 37 for_each_power_well_reverse((__display), __power_well) \ 38 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 39 40 static const char * 41 intel_display_power_domain_str(enum intel_display_power_domain domain) 42 { 43 switch (domain) { 44 case POWER_DOMAIN_DISPLAY_CORE: 45 return "DISPLAY_CORE"; 46 case POWER_DOMAIN_PIPE_A: 47 return "PIPE_A"; 48 case POWER_DOMAIN_PIPE_B: 49 return "PIPE_B"; 50 case POWER_DOMAIN_PIPE_C: 51 return "PIPE_C"; 52 case POWER_DOMAIN_PIPE_D: 53 return "PIPE_D"; 54 case POWER_DOMAIN_PIPE_PANEL_FITTER_A: 55 return "PIPE_PANEL_FITTER_A"; 56 case POWER_DOMAIN_PIPE_PANEL_FITTER_B: 57 return "PIPE_PANEL_FITTER_B"; 58 case POWER_DOMAIN_PIPE_PANEL_FITTER_C: 59 return "PIPE_PANEL_FITTER_C"; 60 case POWER_DOMAIN_PIPE_PANEL_FITTER_D: 61 return "PIPE_PANEL_FITTER_D"; 62 case POWER_DOMAIN_TRANSCODER_A: 63 return "TRANSCODER_A"; 64 case POWER_DOMAIN_TRANSCODER_B: 65 return "TRANSCODER_B"; 66 case POWER_DOMAIN_TRANSCODER_C: 67 return "TRANSCODER_C"; 68 case POWER_DOMAIN_TRANSCODER_D: 69 return "TRANSCODER_D"; 70 case POWER_DOMAIN_TRANSCODER_EDP: 71 return "TRANSCODER_EDP"; 72 case POWER_DOMAIN_TRANSCODER_DSI_A: 73 return "TRANSCODER_DSI_A"; 74 case POWER_DOMAIN_TRANSCODER_DSI_C: 75 return "TRANSCODER_DSI_C"; 76 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 77 return "TRANSCODER_VDSC_PW2"; 78 case POWER_DOMAIN_PORT_DDI_LANES_A: 79 return "PORT_DDI_LANES_A"; 80 case POWER_DOMAIN_PORT_DDI_LANES_B: 81 return "PORT_DDI_LANES_B"; 82 case POWER_DOMAIN_PORT_DDI_LANES_C: 83 return "PORT_DDI_LANES_C"; 84 case POWER_DOMAIN_PORT_DDI_LANES_D: 85 return "PORT_DDI_LANES_D"; 86 case POWER_DOMAIN_PORT_DDI_LANES_E: 87 return "PORT_DDI_LANES_E"; 88 case POWER_DOMAIN_PORT_DDI_LANES_F: 89 return "PORT_DDI_LANES_F"; 90 case POWER_DOMAIN_PORT_DDI_LANES_TC1: 91 return "PORT_DDI_LANES_TC1"; 92 case POWER_DOMAIN_PORT_DDI_LANES_TC2: 93 return "PORT_DDI_LANES_TC2"; 94 case POWER_DOMAIN_PORT_DDI_LANES_TC3: 95 return "PORT_DDI_LANES_TC3"; 96 case POWER_DOMAIN_PORT_DDI_LANES_TC4: 97 return "PORT_DDI_LANES_TC4"; 98 case POWER_DOMAIN_PORT_DDI_LANES_TC5: 99 return "PORT_DDI_LANES_TC5"; 100 case POWER_DOMAIN_PORT_DDI_LANES_TC6: 101 return "PORT_DDI_LANES_TC6"; 102 case POWER_DOMAIN_PORT_DDI_IO_A: 103 return "PORT_DDI_IO_A"; 104 case POWER_DOMAIN_PORT_DDI_IO_B: 105 return "PORT_DDI_IO_B"; 106 case POWER_DOMAIN_PORT_DDI_IO_C: 107 return "PORT_DDI_IO_C"; 108 case POWER_DOMAIN_PORT_DDI_IO_D: 109 return "PORT_DDI_IO_D"; 110 case POWER_DOMAIN_PORT_DDI_IO_E: 111 return "PORT_DDI_IO_E"; 112 case POWER_DOMAIN_PORT_DDI_IO_F: 113 return "PORT_DDI_IO_F"; 114 case POWER_DOMAIN_PORT_DDI_IO_TC1: 115 return "PORT_DDI_IO_TC1"; 116 case POWER_DOMAIN_PORT_DDI_IO_TC2: 117 return "PORT_DDI_IO_TC2"; 118 case POWER_DOMAIN_PORT_DDI_IO_TC3: 119 return "PORT_DDI_IO_TC3"; 120 case POWER_DOMAIN_PORT_DDI_IO_TC4: 121 return "PORT_DDI_IO_TC4"; 122 case POWER_DOMAIN_PORT_DDI_IO_TC5: 123 return "PORT_DDI_IO_TC5"; 124 case POWER_DOMAIN_PORT_DDI_IO_TC6: 125 return "PORT_DDI_IO_TC6"; 126 case POWER_DOMAIN_PORT_DSI: 127 return "PORT_DSI"; 128 case POWER_DOMAIN_PORT_CRT: 129 return "PORT_CRT"; 130 case POWER_DOMAIN_PORT_OTHER: 131 return "PORT_OTHER"; 132 case POWER_DOMAIN_VGA: 133 return "VGA"; 134 case POWER_DOMAIN_AUDIO_MMIO: 135 return "AUDIO_MMIO"; 136 case POWER_DOMAIN_AUDIO_PLAYBACK: 137 return "AUDIO_PLAYBACK"; 138 case POWER_DOMAIN_AUX_IO_A: 139 return "AUX_IO_A"; 140 case POWER_DOMAIN_AUX_IO_B: 141 return "AUX_IO_B"; 142 case POWER_DOMAIN_AUX_IO_C: 143 return "AUX_IO_C"; 144 case POWER_DOMAIN_AUX_IO_D: 145 return "AUX_IO_D"; 146 case POWER_DOMAIN_AUX_IO_E: 147 return "AUX_IO_E"; 148 case POWER_DOMAIN_AUX_IO_F: 149 return "AUX_IO_F"; 150 case POWER_DOMAIN_AUX_A: 151 return "AUX_A"; 152 case POWER_DOMAIN_AUX_B: 153 return "AUX_B"; 154 case POWER_DOMAIN_AUX_C: 155 return "AUX_C"; 156 case POWER_DOMAIN_AUX_D: 157 return "AUX_D"; 158 case POWER_DOMAIN_AUX_E: 159 return "AUX_E"; 160 case POWER_DOMAIN_AUX_F: 161 return "AUX_F"; 162 case POWER_DOMAIN_AUX_USBC1: 163 return "AUX_USBC1"; 164 case POWER_DOMAIN_AUX_USBC2: 165 return "AUX_USBC2"; 166 case POWER_DOMAIN_AUX_USBC3: 167 return "AUX_USBC3"; 168 case POWER_DOMAIN_AUX_USBC4: 169 return "AUX_USBC4"; 170 case POWER_DOMAIN_AUX_USBC5: 171 return "AUX_USBC5"; 172 case POWER_DOMAIN_AUX_USBC6: 173 return "AUX_USBC6"; 174 case POWER_DOMAIN_AUX_TBT1: 175 return "AUX_TBT1"; 176 case POWER_DOMAIN_AUX_TBT2: 177 return "AUX_TBT2"; 178 case POWER_DOMAIN_AUX_TBT3: 179 return "AUX_TBT3"; 180 case POWER_DOMAIN_AUX_TBT4: 181 return "AUX_TBT4"; 182 case POWER_DOMAIN_AUX_TBT5: 183 return "AUX_TBT5"; 184 case POWER_DOMAIN_AUX_TBT6: 185 return "AUX_TBT6"; 186 case POWER_DOMAIN_GMBUS: 187 return "GMBUS"; 188 case POWER_DOMAIN_INIT: 189 return "INIT"; 190 case POWER_DOMAIN_GT_IRQ: 191 return "GT_IRQ"; 192 case POWER_DOMAIN_DC_OFF: 193 return "DC_OFF"; 194 case POWER_DOMAIN_TC_COLD_OFF: 195 return "TC_COLD_OFF"; 196 default: 197 MISSING_CASE(domain); 198 return "?"; 199 } 200 } 201 202 static bool __intel_display_power_is_enabled(struct intel_display *display, 203 enum intel_display_power_domain domain) 204 { 205 struct i915_power_well *power_well; 206 bool is_enabled; 207 208 if (intel_display_rpm_suspended(display)) 209 return false; 210 211 is_enabled = true; 212 213 for_each_power_domain_well_reverse(display, power_well, domain) { 214 if (intel_power_well_is_always_on(power_well)) 215 continue; 216 217 if (!intel_power_well_is_enabled_cached(power_well)) { 218 is_enabled = false; 219 break; 220 } 221 } 222 223 return is_enabled; 224 } 225 226 /** 227 * intel_display_power_is_enabled - check for a power domain 228 * @display: display device instance 229 * @domain: power domain to check 230 * 231 * This function can be used to check the hw power domain state. It is mostly 232 * used in hardware state readout functions. Everywhere else code should rely 233 * upon explicit power domain reference counting to ensure that the hardware 234 * block is powered up before accessing it. 235 * 236 * Callers must hold the relevant modesetting locks to ensure that concurrent 237 * threads can't disable the power well while the caller tries to read a few 238 * registers. 239 * 240 * Returns: 241 * True when the power domain is enabled, false otherwise. 242 */ 243 bool intel_display_power_is_enabled(struct intel_display *display, 244 enum intel_display_power_domain domain) 245 { 246 struct i915_power_domains *power_domains = &display->power.domains; 247 bool ret; 248 249 mutex_lock(&power_domains->lock); 250 ret = __intel_display_power_is_enabled(display, domain); 251 mutex_unlock(&power_domains->lock); 252 253 return ret; 254 } 255 256 static u32 257 sanitize_target_dc_state(struct intel_display *display, 258 u32 target_dc_state) 259 { 260 struct i915_power_domains *power_domains = &display->power.domains; 261 static const u32 states[] = { 262 DC_STATE_EN_UPTO_DC6, 263 DC_STATE_EN_UPTO_DC5, 264 DC_STATE_EN_DC3CO, 265 DC_STATE_DISABLE, 266 }; 267 int i; 268 269 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 270 if (target_dc_state != states[i]) 271 continue; 272 273 if (power_domains->allowed_dc_mask & target_dc_state) 274 break; 275 276 target_dc_state = states[i + 1]; 277 } 278 279 return target_dc_state; 280 } 281 282 /** 283 * intel_display_power_set_target_dc_state - Set target dc state. 284 * @display: display device 285 * @state: state which needs to be set as target_dc_state. 286 * 287 * This function set the "DC off" power well target_dc_state, 288 * based upon this target_dc_stste, "DC off" power well will 289 * enable desired DC state. 290 */ 291 void intel_display_power_set_target_dc_state(struct intel_display *display, 292 u32 state) 293 { 294 struct i915_power_well *power_well; 295 bool dc_off_enabled; 296 struct i915_power_domains *power_domains = &display->power.domains; 297 298 mutex_lock(&power_domains->lock); 299 power_well = lookup_power_well(display, SKL_DISP_DC_OFF); 300 301 if (drm_WARN_ON(display->drm, !power_well)) 302 goto unlock; 303 304 state = sanitize_target_dc_state(display, state); 305 306 if (state == power_domains->target_dc_state) 307 goto unlock; 308 309 dc_off_enabled = intel_power_well_is_enabled(display, power_well); 310 /* 311 * If DC off power well is disabled, need to enable and disable the 312 * DC off power well to effect target DC state. 313 */ 314 if (!dc_off_enabled) 315 intel_power_well_enable(display, power_well); 316 317 power_domains->target_dc_state = state; 318 319 if (!dc_off_enabled) 320 intel_power_well_disable(display, power_well); 321 322 unlock: 323 mutex_unlock(&power_domains->lock); 324 } 325 326 /** 327 * intel_display_power_get_current_dc_state - Set target dc state. 328 * @display: display device 329 * 330 * This function set the "DC off" power well target_dc_state, 331 * based upon this target_dc_stste, "DC off" power well will 332 * enable desired DC state. 333 */ 334 u32 intel_display_power_get_current_dc_state(struct intel_display *display) 335 { 336 struct i915_power_well *power_well; 337 struct i915_power_domains *power_domains = &display->power.domains; 338 u32 current_dc_state = DC_STATE_DISABLE; 339 340 mutex_lock(&power_domains->lock); 341 power_well = lookup_power_well(display, SKL_DISP_DC_OFF); 342 343 if (drm_WARN_ON(display->drm, !power_well)) 344 goto unlock; 345 346 current_dc_state = intel_power_well_is_enabled(display, power_well) ? 347 DC_STATE_DISABLE : power_domains->target_dc_state; 348 349 unlock: 350 mutex_unlock(&power_domains->lock); 351 352 return current_dc_state; 353 } 354 355 static void __async_put_domains_mask(struct i915_power_domains *power_domains, 356 struct intel_power_domain_mask *mask) 357 { 358 bitmap_or(mask->bits, 359 power_domains->async_put_domains[0].bits, 360 power_domains->async_put_domains[1].bits, 361 POWER_DOMAIN_NUM); 362 } 363 364 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 365 366 static bool 367 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 368 { 369 struct intel_display *display = container_of(power_domains, 370 struct intel_display, 371 power.domains); 372 373 return !drm_WARN_ON(display->drm, 374 bitmap_intersects(power_domains->async_put_domains[0].bits, 375 power_domains->async_put_domains[1].bits, 376 POWER_DOMAIN_NUM)); 377 } 378 379 static bool 380 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 381 { 382 struct intel_display *display = container_of(power_domains, 383 struct intel_display, 384 power.domains); 385 struct intel_power_domain_mask async_put_mask; 386 enum intel_display_power_domain domain; 387 bool err = false; 388 389 err |= !assert_async_put_domain_masks_disjoint(power_domains); 390 __async_put_domains_mask(power_domains, &async_put_mask); 391 err |= drm_WARN_ON(display->drm, 392 !!power_domains->async_put_wakeref != 393 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)); 394 395 for_each_power_domain(domain, &async_put_mask) 396 err |= drm_WARN_ON(display->drm, 397 power_domains->domain_use_count[domain] != 1); 398 399 return !err; 400 } 401 402 static void print_power_domains(struct i915_power_domains *power_domains, 403 const char *prefix, struct intel_power_domain_mask *mask) 404 { 405 struct intel_display *display = container_of(power_domains, 406 struct intel_display, 407 power.domains); 408 enum intel_display_power_domain domain; 409 410 drm_dbg_kms(display->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); 411 for_each_power_domain(domain, mask) 412 drm_dbg_kms(display->drm, "%s use_count %d\n", 413 intel_display_power_domain_str(domain), 414 power_domains->domain_use_count[domain]); 415 } 416 417 static void 418 print_async_put_domains_state(struct i915_power_domains *power_domains) 419 { 420 struct intel_display *display = container_of(power_domains, 421 struct intel_display, 422 power.domains); 423 424 drm_dbg_kms(display->drm, "async_put_wakeref: %s\n", 425 str_yes_no(power_domains->async_put_wakeref)); 426 427 print_power_domains(power_domains, "async_put_domains[0]", 428 &power_domains->async_put_domains[0]); 429 print_power_domains(power_domains, "async_put_domains[1]", 430 &power_domains->async_put_domains[1]); 431 } 432 433 static void 434 verify_async_put_domains_state(struct i915_power_domains *power_domains) 435 { 436 if (!__async_put_domains_state_ok(power_domains)) 437 print_async_put_domains_state(power_domains); 438 } 439 440 #else 441 442 static void 443 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 444 { 445 } 446 447 static void 448 verify_async_put_domains_state(struct i915_power_domains *power_domains) 449 { 450 } 451 452 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 453 454 static void async_put_domains_mask(struct i915_power_domains *power_domains, 455 struct intel_power_domain_mask *mask) 456 457 { 458 assert_async_put_domain_masks_disjoint(power_domains); 459 460 __async_put_domains_mask(power_domains, mask); 461 } 462 463 static void 464 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 465 enum intel_display_power_domain domain) 466 { 467 assert_async_put_domain_masks_disjoint(power_domains); 468 469 clear_bit(domain, power_domains->async_put_domains[0].bits); 470 clear_bit(domain, power_domains->async_put_domains[1].bits); 471 } 472 473 static void 474 cancel_async_put_work(struct i915_power_domains *power_domains, bool sync) 475 { 476 if (sync) 477 cancel_delayed_work_sync(&power_domains->async_put_work); 478 else 479 cancel_delayed_work(&power_domains->async_put_work); 480 481 power_domains->async_put_next_delay = 0; 482 } 483 484 static bool 485 intel_display_power_grab_async_put_ref(struct intel_display *display, 486 enum intel_display_power_domain domain) 487 { 488 struct i915_power_domains *power_domains = &display->power.domains; 489 struct intel_power_domain_mask async_put_mask; 490 bool ret = false; 491 492 async_put_domains_mask(power_domains, &async_put_mask); 493 if (!test_bit(domain, async_put_mask.bits)) 494 goto out_verify; 495 496 async_put_domains_clear_domain(power_domains, domain); 497 498 ret = true; 499 500 async_put_domains_mask(power_domains, &async_put_mask); 501 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) 502 goto out_verify; 503 504 cancel_async_put_work(power_domains, false); 505 intel_display_rpm_put_raw(display, 506 fetch_and_zero(&power_domains->async_put_wakeref)); 507 out_verify: 508 verify_async_put_domains_state(power_domains); 509 510 return ret; 511 } 512 513 static void 514 __intel_display_power_get_domain(struct intel_display *display, 515 enum intel_display_power_domain domain) 516 { 517 struct i915_power_domains *power_domains = &display->power.domains; 518 struct i915_power_well *power_well; 519 520 if (intel_display_power_grab_async_put_ref(display, domain)) 521 return; 522 523 for_each_power_domain_well(display, power_well, domain) 524 intel_power_well_get(display, power_well); 525 526 power_domains->domain_use_count[domain]++; 527 } 528 529 /** 530 * intel_display_power_get - grab a power domain reference 531 * @display: display device instance 532 * @domain: power domain to reference 533 * 534 * This function grabs a power domain reference for @domain and ensures that the 535 * power domain and all its parents are powered up. Therefore users should only 536 * grab a reference to the innermost power domain they need. 537 * 538 * Any power domain reference obtained by this function must have a symmetric 539 * call to intel_display_power_put() to release the reference again. 540 */ 541 intel_wakeref_t intel_display_power_get(struct intel_display *display, 542 enum intel_display_power_domain domain) 543 { 544 struct i915_power_domains *power_domains = &display->power.domains; 545 struct ref_tracker *wakeref; 546 547 wakeref = intel_display_rpm_get(display); 548 549 mutex_lock(&power_domains->lock); 550 __intel_display_power_get_domain(display, domain); 551 mutex_unlock(&power_domains->lock); 552 553 return wakeref; 554 } 555 556 /** 557 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 558 * @display: display device instance 559 * @domain: power domain to reference 560 * 561 * This function grabs a power domain reference for @domain and ensures that the 562 * power domain and all its parents are powered up. Therefore users should only 563 * grab a reference to the innermost power domain they need. 564 * 565 * Any power domain reference obtained by this function must have a symmetric 566 * call to intel_display_power_put() to release the reference again. 567 */ 568 intel_wakeref_t 569 intel_display_power_get_if_enabled(struct intel_display *display, 570 enum intel_display_power_domain domain) 571 { 572 struct i915_power_domains *power_domains = &display->power.domains; 573 struct ref_tracker *wakeref; 574 bool is_enabled; 575 576 wakeref = intel_display_rpm_get_if_in_use(display); 577 if (!wakeref) 578 return NULL; 579 580 mutex_lock(&power_domains->lock); 581 582 if (__intel_display_power_is_enabled(display, domain)) { 583 __intel_display_power_get_domain(display, domain); 584 is_enabled = true; 585 } else { 586 is_enabled = false; 587 } 588 589 mutex_unlock(&power_domains->lock); 590 591 if (!is_enabled) { 592 intel_display_rpm_put(display, wakeref); 593 wakeref = NULL; 594 } 595 596 return wakeref; 597 } 598 599 static void 600 __intel_display_power_put_domain(struct intel_display *display, 601 enum intel_display_power_domain domain) 602 { 603 struct i915_power_domains *power_domains = &display->power.domains; 604 struct i915_power_well *power_well; 605 const char *name = intel_display_power_domain_str(domain); 606 struct intel_power_domain_mask async_put_mask; 607 608 drm_WARN(display->drm, !power_domains->domain_use_count[domain], 609 "Use count on domain %s is already zero\n", 610 name); 611 async_put_domains_mask(power_domains, &async_put_mask); 612 drm_WARN(display->drm, 613 test_bit(domain, async_put_mask.bits), 614 "Async disabling of domain %s is pending\n", 615 name); 616 617 power_domains->domain_use_count[domain]--; 618 619 for_each_power_domain_well_reverse(display, power_well, domain) 620 intel_power_well_put(display, power_well); 621 } 622 623 static void __intel_display_power_put(struct intel_display *display, 624 enum intel_display_power_domain domain) 625 { 626 struct i915_power_domains *power_domains = &display->power.domains; 627 628 mutex_lock(&power_domains->lock); 629 __intel_display_power_put_domain(display, domain); 630 mutex_unlock(&power_domains->lock); 631 } 632 633 static void 634 queue_async_put_domains_work(struct i915_power_domains *power_domains, 635 intel_wakeref_t wakeref, 636 int delay_ms) 637 { 638 struct intel_display *display = container_of(power_domains, 639 struct intel_display, 640 power.domains); 641 drm_WARN_ON(display->drm, power_domains->async_put_wakeref); 642 power_domains->async_put_wakeref = wakeref; 643 drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq, 644 &power_domains->async_put_work, 645 msecs_to_jiffies(delay_ms))); 646 } 647 648 static void 649 release_async_put_domains(struct i915_power_domains *power_domains, 650 struct intel_power_domain_mask *mask) 651 { 652 struct intel_display *display = container_of(power_domains, 653 struct intel_display, 654 power.domains); 655 enum intel_display_power_domain domain; 656 struct ref_tracker *wakeref; 657 658 wakeref = intel_display_rpm_get_noresume(display); 659 660 for_each_power_domain(domain, mask) { 661 /* Clear before put, so put's sanity check is happy. */ 662 async_put_domains_clear_domain(power_domains, domain); 663 __intel_display_power_put_domain(display, domain); 664 } 665 666 intel_display_rpm_put(display, wakeref); 667 } 668 669 static void 670 intel_display_power_put_async_work(struct work_struct *work) 671 { 672 struct intel_display *display = container_of(work, struct intel_display, 673 power.domains.async_put_work.work); 674 struct i915_power_domains *power_domains = &display->power.domains; 675 struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL; 676 677 new_work_wakeref = intel_display_rpm_get_raw(display); 678 679 mutex_lock(&power_domains->lock); 680 681 /* 682 * Bail out if all the domain refs pending to be released were grabbed 683 * by subsequent gets or a flush_work. 684 */ 685 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 686 if (!old_work_wakeref) 687 goto out_verify; 688 689 release_async_put_domains(power_domains, 690 &power_domains->async_put_domains[0]); 691 692 /* 693 * Cancel the work that got queued after this one got dequeued, 694 * since here we released the corresponding async-put reference. 695 */ 696 cancel_async_put_work(power_domains, false); 697 698 /* Requeue the work if more domains were async put meanwhile. */ 699 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { 700 bitmap_copy(power_domains->async_put_domains[0].bits, 701 power_domains->async_put_domains[1].bits, 702 POWER_DOMAIN_NUM); 703 bitmap_zero(power_domains->async_put_domains[1].bits, 704 POWER_DOMAIN_NUM); 705 queue_async_put_domains_work(power_domains, 706 fetch_and_zero(&new_work_wakeref), 707 power_domains->async_put_next_delay); 708 power_domains->async_put_next_delay = 0; 709 } 710 711 out_verify: 712 verify_async_put_domains_state(power_domains); 713 714 mutex_unlock(&power_domains->lock); 715 716 if (old_work_wakeref) 717 intel_display_rpm_put_raw(display, old_work_wakeref); 718 if (new_work_wakeref) 719 intel_display_rpm_put_raw(display, new_work_wakeref); 720 } 721 722 /** 723 * __intel_display_power_put_async - release a power domain reference asynchronously 724 * @display: display device instance 725 * @domain: power domain to reference 726 * @wakeref: wakeref acquired for the reference that is being released 727 * @delay_ms: delay of powering down the power domain 728 * 729 * This function drops the power domain reference obtained by 730 * intel_display_power_get*() and schedules a work to power down the 731 * corresponding hardware block if this is the last reference. 732 * The power down is delayed by @delay_ms if this is >= 0, or by a default 733 * 100 ms otherwise. 734 */ 735 void __intel_display_power_put_async(struct intel_display *display, 736 enum intel_display_power_domain domain, 737 intel_wakeref_t wakeref, 738 int delay_ms) 739 { 740 struct i915_power_domains *power_domains = &display->power.domains; 741 struct ref_tracker *work_wakeref; 742 743 work_wakeref = intel_display_rpm_get_raw(display); 744 745 delay_ms = delay_ms >= 0 ? delay_ms : 100; 746 747 mutex_lock(&power_domains->lock); 748 749 if (power_domains->domain_use_count[domain] > 1) { 750 __intel_display_power_put_domain(display, domain); 751 752 goto out_verify; 753 } 754 755 drm_WARN_ON(display->drm, power_domains->domain_use_count[domain] != 1); 756 757 /* Let a pending work requeue itself or queue a new one. */ 758 if (power_domains->async_put_wakeref) { 759 set_bit(domain, power_domains->async_put_domains[1].bits); 760 power_domains->async_put_next_delay = max(power_domains->async_put_next_delay, 761 delay_ms); 762 } else { 763 set_bit(domain, power_domains->async_put_domains[0].bits); 764 queue_async_put_domains_work(power_domains, 765 fetch_and_zero(&work_wakeref), 766 delay_ms); 767 } 768 769 out_verify: 770 verify_async_put_domains_state(power_domains); 771 772 mutex_unlock(&power_domains->lock); 773 774 if (work_wakeref) 775 intel_display_rpm_put_raw(display, work_wakeref); 776 777 intel_display_rpm_put(display, wakeref); 778 } 779 780 /** 781 * intel_display_power_flush_work - flushes the async display power disabling work 782 * @display: display device instance 783 * 784 * Flushes any pending work that was scheduled by a preceding 785 * intel_display_power_put_async() call, completing the disabling of the 786 * corresponding power domains. 787 * 788 * Note that the work handler function may still be running after this 789 * function returns; to ensure that the work handler isn't running use 790 * intel_display_power_flush_work_sync() instead. 791 */ 792 void intel_display_power_flush_work(struct intel_display *display) 793 { 794 struct i915_power_domains *power_domains = &display->power.domains; 795 struct intel_power_domain_mask async_put_mask; 796 intel_wakeref_t work_wakeref; 797 798 mutex_lock(&power_domains->lock); 799 800 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 801 if (!work_wakeref) 802 goto out_verify; 803 804 async_put_domains_mask(power_domains, &async_put_mask); 805 release_async_put_domains(power_domains, &async_put_mask); 806 cancel_async_put_work(power_domains, false); 807 808 out_verify: 809 verify_async_put_domains_state(power_domains); 810 811 mutex_unlock(&power_domains->lock); 812 813 if (work_wakeref) 814 intel_display_rpm_put_raw(display, work_wakeref); 815 } 816 817 /** 818 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 819 * @display: display device instance 820 * 821 * Like intel_display_power_flush_work(), but also ensure that the work 822 * handler function is not running any more when this function returns. 823 */ 824 static void 825 intel_display_power_flush_work_sync(struct intel_display *display) 826 { 827 struct i915_power_domains *power_domains = &display->power.domains; 828 829 intel_display_power_flush_work(display); 830 cancel_async_put_work(power_domains, true); 831 832 verify_async_put_domains_state(power_domains); 833 834 drm_WARN_ON(display->drm, power_domains->async_put_wakeref); 835 } 836 837 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 838 /** 839 * intel_display_power_put - release a power domain reference 840 * @display: display device instance 841 * @domain: power domain to reference 842 * @wakeref: wakeref acquired for the reference that is being released 843 * 844 * This function drops the power domain reference obtained by 845 * intel_display_power_get() and might power down the corresponding hardware 846 * block right away if this is the last reference. 847 */ 848 void intel_display_power_put(struct intel_display *display, 849 enum intel_display_power_domain domain, 850 intel_wakeref_t wakeref) 851 { 852 __intel_display_power_put(display, domain); 853 intel_display_rpm_put(display, wakeref); 854 } 855 #else 856 /** 857 * intel_display_power_put_unchecked - release an unchecked power domain reference 858 * @display: display device instance 859 * @domain: power domain to reference 860 * 861 * This function drops the power domain reference obtained by 862 * intel_display_power_get() and might power down the corresponding hardware 863 * block right away if this is the last reference. 864 * 865 * This function is only for the power domain code's internal use to suppress wakeref 866 * tracking when the corresponding debug kconfig option is disabled, should not 867 * be used otherwise. 868 */ 869 void intel_display_power_put_unchecked(struct intel_display *display, 870 enum intel_display_power_domain domain) 871 { 872 __intel_display_power_put(display, domain); 873 intel_display_rpm_put_unchecked(display); 874 } 875 #endif 876 877 void 878 intel_display_power_get_in_set(struct intel_display *display, 879 struct intel_display_power_domain_set *power_domain_set, 880 enum intel_display_power_domain domain) 881 { 882 intel_wakeref_t __maybe_unused wf; 883 884 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits)); 885 886 wf = intel_display_power_get(display, domain); 887 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 888 power_domain_set->wakerefs[domain] = wf; 889 #endif 890 set_bit(domain, power_domain_set->mask.bits); 891 } 892 893 bool 894 intel_display_power_get_in_set_if_enabled(struct intel_display *display, 895 struct intel_display_power_domain_set *power_domain_set, 896 enum intel_display_power_domain domain) 897 { 898 intel_wakeref_t wf; 899 900 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits)); 901 902 wf = intel_display_power_get_if_enabled(display, domain); 903 if (!wf) 904 return false; 905 906 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 907 power_domain_set->wakerefs[domain] = wf; 908 #endif 909 set_bit(domain, power_domain_set->mask.bits); 910 911 return true; 912 } 913 914 void 915 intel_display_power_put_mask_in_set(struct intel_display *display, 916 struct intel_display_power_domain_set *power_domain_set, 917 struct intel_power_domain_mask *mask) 918 { 919 enum intel_display_power_domain domain; 920 921 drm_WARN_ON(display->drm, 922 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); 923 924 for_each_power_domain(domain, mask) { 925 intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF; 926 927 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 928 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 929 #endif 930 intel_display_power_put(display, domain, wf); 931 clear_bit(domain, power_domain_set->mask.bits); 932 } 933 } 934 935 static int 936 sanitize_disable_power_well_option(int disable_power_well) 937 { 938 if (disable_power_well >= 0) 939 return !!disable_power_well; 940 941 return 1; 942 } 943 944 static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc) 945 { 946 u32 mask; 947 int requested_dc; 948 int max_dc; 949 950 if (!HAS_DISPLAY(display)) 951 return 0; 952 953 if (DISPLAY_VER(display) >= 20) 954 max_dc = 2; 955 else if (display->platform.dg2) 956 max_dc = 1; 957 else if (display->platform.dg1) 958 max_dc = 3; 959 else if (DISPLAY_VER(display) >= 12) 960 max_dc = 4; 961 else if (display->platform.geminilake || display->platform.broxton) 962 max_dc = 1; 963 else if (DISPLAY_VER(display) >= 9) 964 max_dc = 2; 965 else 966 max_dc = 0; 967 968 /* 969 * DC9 has a separate HW flow from the rest of the DC states, 970 * not depending on the DMC firmware. It's needed by system 971 * suspend/resume, so allow it unconditionally. 972 */ 973 mask = display->platform.geminilake || display->platform.broxton || 974 DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0; 975 976 if (!display->params.disable_power_well) 977 max_dc = 0; 978 979 if (enable_dc >= 0 && enable_dc <= max_dc) { 980 requested_dc = enable_dc; 981 } else if (enable_dc == -1) { 982 requested_dc = max_dc; 983 } else if (enable_dc > max_dc && enable_dc <= 4) { 984 drm_dbg_kms(display->drm, 985 "Adjusting requested max DC state (%d->%d)\n", 986 enable_dc, max_dc); 987 requested_dc = max_dc; 988 } else { 989 drm_err(display->drm, 990 "Unexpected value for enable_dc (%d)\n", enable_dc); 991 requested_dc = max_dc; 992 } 993 994 switch (requested_dc) { 995 case 4: 996 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 997 break; 998 case 3: 999 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 1000 break; 1001 case 2: 1002 mask |= DC_STATE_EN_UPTO_DC6; 1003 break; 1004 case 1: 1005 mask |= DC_STATE_EN_UPTO_DC5; 1006 break; 1007 } 1008 1009 drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask); 1010 1011 return mask; 1012 } 1013 1014 /** 1015 * intel_power_domains_init - initializes the power domain structures 1016 * @display: display device instance 1017 * 1018 * Initializes the power domain structures for @display depending upon the 1019 * supported platform. 1020 */ 1021 int intel_power_domains_init(struct intel_display *display) 1022 { 1023 struct i915_power_domains *power_domains = &display->power.domains; 1024 1025 display->params.disable_power_well = 1026 sanitize_disable_power_well_option(display->params.disable_power_well); 1027 power_domains->allowed_dc_mask = 1028 get_allowed_dc_mask(display, display->params.enable_dc); 1029 1030 power_domains->target_dc_state = 1031 sanitize_target_dc_state(display, DC_STATE_EN_UPTO_DC6); 1032 1033 mutex_init(&power_domains->lock); 1034 1035 INIT_DELAYED_WORK(&power_domains->async_put_work, 1036 intel_display_power_put_async_work); 1037 1038 return intel_display_power_map_init(power_domains); 1039 } 1040 1041 /** 1042 * intel_power_domains_cleanup - clean up power domains resources 1043 * @display: display device instance 1044 * 1045 * Release any resources acquired by intel_power_domains_init() 1046 */ 1047 void intel_power_domains_cleanup(struct intel_display *display) 1048 { 1049 intel_display_power_map_cleanup(&display->power.domains); 1050 } 1051 1052 static void intel_power_domains_sync_hw(struct intel_display *display) 1053 { 1054 struct i915_power_domains *power_domains = &display->power.domains; 1055 struct i915_power_well *power_well; 1056 1057 mutex_lock(&power_domains->lock); 1058 for_each_power_well(display, power_well) 1059 intel_power_well_sync_hw(display, power_well); 1060 mutex_unlock(&power_domains->lock); 1061 } 1062 1063 static void gen9_dbuf_slice_set(struct intel_display *display, 1064 enum dbuf_slice slice, bool enable) 1065 { 1066 i915_reg_t reg = DBUF_CTL_S(slice); 1067 bool state; 1068 1069 intel_de_rmw(display, reg, DBUF_POWER_REQUEST, 1070 enable ? DBUF_POWER_REQUEST : 0); 1071 intel_de_posting_read(display, reg); 1072 udelay(10); 1073 1074 state = intel_de_read(display, reg) & DBUF_POWER_STATE; 1075 drm_WARN(display->drm, enable != state, 1076 "DBuf slice %d power %s timeout!\n", 1077 slice, str_enable_disable(enable)); 1078 } 1079 1080 void gen9_dbuf_slices_update(struct intel_display *display, 1081 u8 req_slices) 1082 { 1083 struct i915_power_domains *power_domains = &display->power.domains; 1084 u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask; 1085 enum dbuf_slice slice; 1086 1087 drm_WARN(display->drm, req_slices & ~slice_mask, 1088 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 1089 req_slices, slice_mask); 1090 1091 drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n", 1092 req_slices); 1093 1094 /* 1095 * Might be running this in parallel to gen9_dc_off_power_well_enable 1096 * being called from intel_dp_detect for instance, 1097 * which causes assertion triggered by race condition, 1098 * as gen9_assert_dbuf_enabled might preempt this when registers 1099 * were already updated, while dev_priv was not. 1100 */ 1101 mutex_lock(&power_domains->lock); 1102 1103 for_each_dbuf_slice(display, slice) 1104 gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice)); 1105 1106 display->dbuf.enabled_slices = req_slices; 1107 1108 mutex_unlock(&power_domains->lock); 1109 } 1110 1111 static void gen9_dbuf_enable(struct intel_display *display) 1112 { 1113 u8 slices_mask; 1114 1115 display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(display); 1116 1117 slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices; 1118 1119 if (DISPLAY_VER(display) >= 14) 1120 intel_pmdemand_program_dbuf(display, slices_mask); 1121 1122 /* 1123 * Just power up at least 1 slice, we will 1124 * figure out later which slices we have and what we need. 1125 */ 1126 gen9_dbuf_slices_update(display, slices_mask); 1127 } 1128 1129 static void gen9_dbuf_disable(struct intel_display *display) 1130 { 1131 gen9_dbuf_slices_update(display, 0); 1132 1133 if (DISPLAY_VER(display) >= 14) 1134 intel_pmdemand_program_dbuf(display, 0); 1135 } 1136 1137 static void gen12_dbuf_slices_config(struct intel_display *display) 1138 { 1139 enum dbuf_slice slice; 1140 1141 for_each_dbuf_slice(display, slice) 1142 intel_de_rmw(display, DBUF_CTL_S(slice), 1143 DBUF_TRACKER_STATE_SERVICE_MASK, 1144 DBUF_TRACKER_STATE_SERVICE(8)); 1145 } 1146 1147 static void icl_mbus_init(struct intel_display *display) 1148 { 1149 unsigned long abox_regs = DISPLAY_INFO(display)->abox_mask; 1150 u32 mask, val, i; 1151 1152 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) 1153 return; 1154 1155 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 1156 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 1157 MBUS_ABOX_B_CREDIT_MASK | 1158 MBUS_ABOX_BW_CREDIT_MASK; 1159 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 1160 MBUS_ABOX_BT_CREDIT_POOL2(16) | 1161 MBUS_ABOX_B_CREDIT(1) | 1162 MBUS_ABOX_BW_CREDIT(1); 1163 1164 /* 1165 * gen12 platforms that use abox1 and abox2 for pixel data reads still 1166 * expect us to program the abox_ctl0 register as well, even though 1167 * we don't have to program other instance-0 registers like BW_BUDDY. 1168 */ 1169 if (DISPLAY_VER(display) == 12) 1170 abox_regs |= BIT(0); 1171 1172 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 1173 intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val); 1174 } 1175 1176 static void hsw_assert_cdclk(struct intel_display *display) 1177 { 1178 u32 val = intel_de_read(display, LCPLL_CTL); 1179 1180 /* 1181 * The LCPLL register should be turned on by the BIOS. For now 1182 * let's just check its state and print errors in case 1183 * something is wrong. Don't even try to turn it on. 1184 */ 1185 1186 if (val & LCPLL_CD_SOURCE_FCLK) 1187 drm_err(display->drm, "CDCLK source is not LCPLL\n"); 1188 1189 if (val & LCPLL_PLL_DISABLE) 1190 drm_err(display->drm, "LCPLL is disabled\n"); 1191 1192 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 1193 drm_err(display->drm, "LCPLL not using non-SSC reference\n"); 1194 } 1195 1196 static void assert_can_disable_lcpll(struct intel_display *display) 1197 { 1198 struct drm_i915_private *dev_priv = to_i915(display->drm); 1199 struct intel_crtc *crtc; 1200 1201 for_each_intel_crtc(display->drm, crtc) 1202 INTEL_DISPLAY_STATE_WARN(display, crtc->active, 1203 "CRTC for pipe %c enabled\n", 1204 pipe_name(crtc->pipe)); 1205 1206 INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2), 1207 "Display power well on\n"); 1208 INTEL_DISPLAY_STATE_WARN(display, 1209 intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE, 1210 "SPLL enabled\n"); 1211 INTEL_DISPLAY_STATE_WARN(display, 1212 intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 1213 "WRPLL1 enabled\n"); 1214 INTEL_DISPLAY_STATE_WARN(display, 1215 intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 1216 "WRPLL2 enabled\n"); 1217 INTEL_DISPLAY_STATE_WARN(display, 1218 intel_de_read(display, PP_STATUS(display, 0)) & PP_ON, 1219 "Panel power on\n"); 1220 INTEL_DISPLAY_STATE_WARN(display, 1221 intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 1222 "CPU PWM1 enabled\n"); 1223 if (display->platform.haswell) 1224 INTEL_DISPLAY_STATE_WARN(display, 1225 intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 1226 "CPU PWM2 enabled\n"); 1227 INTEL_DISPLAY_STATE_WARN(display, 1228 intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 1229 "PCH PWM1 enabled\n"); 1230 INTEL_DISPLAY_STATE_WARN(display, 1231 (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), 1232 "Utility pin enabled in PWM mode\n"); 1233 INTEL_DISPLAY_STATE_WARN(display, 1234 intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE, 1235 "PCH GTC enabled\n"); 1236 1237 /* 1238 * In theory we can still leave IRQs enabled, as long as only the HPD 1239 * interrupts remain enabled. We used to check for that, but since it's 1240 * gen-specific and since we only disable LCPLL after we fully disable 1241 * the interrupts, the check below should be enough. 1242 */ 1243 INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv), 1244 "IRQs enabled\n"); 1245 } 1246 1247 static u32 hsw_read_dcomp(struct intel_display *display) 1248 { 1249 if (display->platform.haswell) 1250 return intel_de_read(display, D_COMP_HSW); 1251 else 1252 return intel_de_read(display, D_COMP_BDW); 1253 } 1254 1255 static void hsw_write_dcomp(struct intel_display *display, u32 val) 1256 { 1257 struct drm_i915_private *dev_priv = to_i915(display->drm); 1258 1259 if (display->platform.haswell) { 1260 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) 1261 drm_dbg_kms(display->drm, "Failed to write to D_COMP\n"); 1262 } else { 1263 intel_de_write(display, D_COMP_BDW, val); 1264 intel_de_posting_read(display, D_COMP_BDW); 1265 } 1266 } 1267 1268 /* 1269 * This function implements pieces of two sequences from BSpec: 1270 * - Sequence for display software to disable LCPLL 1271 * - Sequence for display software to allow package C8+ 1272 * The steps implemented here are just the steps that actually touch the LCPLL 1273 * register. Callers should take care of disabling all the display engine 1274 * functions, doing the mode unset, fixing interrupts, etc. 1275 */ 1276 static void hsw_disable_lcpll(struct intel_display *display, 1277 bool switch_to_fclk, bool allow_power_down) 1278 { 1279 u32 val; 1280 1281 assert_can_disable_lcpll(display); 1282 1283 val = intel_de_read(display, LCPLL_CTL); 1284 1285 if (switch_to_fclk) { 1286 val |= LCPLL_CD_SOURCE_FCLK; 1287 intel_de_write(display, LCPLL_CTL, val); 1288 1289 if (wait_for_us(intel_de_read(display, LCPLL_CTL) & 1290 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 1291 drm_err(display->drm, "Switching to FCLK failed\n"); 1292 1293 val = intel_de_read(display, LCPLL_CTL); 1294 } 1295 1296 val |= LCPLL_PLL_DISABLE; 1297 intel_de_write(display, LCPLL_CTL, val); 1298 intel_de_posting_read(display, LCPLL_CTL); 1299 1300 if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 1301 drm_err(display->drm, "LCPLL still locked\n"); 1302 1303 val = hsw_read_dcomp(display); 1304 val |= D_COMP_COMP_DISABLE; 1305 hsw_write_dcomp(display, val); 1306 ndelay(100); 1307 1308 if (wait_for((hsw_read_dcomp(display) & 1309 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 1310 drm_err(display->drm, "D_COMP RCOMP still in progress\n"); 1311 1312 if (allow_power_down) { 1313 intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW); 1314 intel_de_posting_read(display, LCPLL_CTL); 1315 } 1316 } 1317 1318 /* 1319 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 1320 * source. 1321 */ 1322 static void hsw_restore_lcpll(struct intel_display *display) 1323 { 1324 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm); 1325 u32 val; 1326 1327 val = intel_de_read(display, LCPLL_CTL); 1328 1329 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 1330 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 1331 return; 1332 1333 /* 1334 * Make sure we're not on PC8 state before disabling PC8, otherwise 1335 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 1336 */ 1337 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 1338 1339 if (val & LCPLL_POWER_DOWN_ALLOW) { 1340 val &= ~LCPLL_POWER_DOWN_ALLOW; 1341 intel_de_write(display, LCPLL_CTL, val); 1342 intel_de_posting_read(display, LCPLL_CTL); 1343 } 1344 1345 val = hsw_read_dcomp(display); 1346 val |= D_COMP_COMP_FORCE; 1347 val &= ~D_COMP_COMP_DISABLE; 1348 hsw_write_dcomp(display, val); 1349 1350 val = intel_de_read(display, LCPLL_CTL); 1351 val &= ~LCPLL_PLL_DISABLE; 1352 intel_de_write(display, LCPLL_CTL, val); 1353 1354 if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 1355 drm_err(display->drm, "LCPLL not locked yet\n"); 1356 1357 if (val & LCPLL_CD_SOURCE_FCLK) { 1358 intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); 1359 1360 if (wait_for_us((intel_de_read(display, LCPLL_CTL) & 1361 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 1362 drm_err(display->drm, 1363 "Switching back to LCPLL failed\n"); 1364 } 1365 1366 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1367 1368 intel_update_cdclk(display); 1369 intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); 1370 } 1371 1372 /* 1373 * Package states C8 and deeper are really deep PC states that can only be 1374 * reached when all the devices on the system allow it, so even if the graphics 1375 * device allows PC8+, it doesn't mean the system will actually get to these 1376 * states. Our driver only allows PC8+ when going into runtime PM. 1377 * 1378 * The requirements for PC8+ are that all the outputs are disabled, the power 1379 * well is disabled and most interrupts are disabled, and these are also 1380 * requirements for runtime PM. When these conditions are met, we manually do 1381 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 1382 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 1383 * hang the machine. 1384 * 1385 * When we really reach PC8 or deeper states (not just when we allow it) we lose 1386 * the state of some registers, so when we come back from PC8+ we need to 1387 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 1388 * need to take care of the registers kept by RC6. Notice that this happens even 1389 * if we don't put the device in PCI D3 state (which is what currently happens 1390 * because of the runtime PM support). 1391 * 1392 * For more, read "Display Sequences for Package C8" on the hardware 1393 * documentation. 1394 */ 1395 static void hsw_enable_pc8(struct intel_display *display) 1396 { 1397 drm_dbg_kms(display->drm, "Enabling package C8+\n"); 1398 1399 if (HAS_PCH_LPT_LP(display)) 1400 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 1401 PCH_LP_PARTITION_LEVEL_DISABLE, 0); 1402 1403 lpt_disable_clkout_dp(display); 1404 hsw_disable_lcpll(display, true, true); 1405 } 1406 1407 static void hsw_disable_pc8(struct intel_display *display) 1408 { 1409 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm); 1410 1411 drm_dbg_kms(display->drm, "Disabling package C8+\n"); 1412 1413 hsw_restore_lcpll(display); 1414 intel_init_pch_refclk(display); 1415 1416 /* Many display registers don't survive PC8+ */ 1417 #ifdef I915 /* FIXME */ 1418 intel_clock_gating_init(dev_priv); 1419 #endif 1420 } 1421 1422 static void intel_pch_reset_handshake(struct intel_display *display, 1423 bool enable) 1424 { 1425 i915_reg_t reg; 1426 u32 reset_bits; 1427 1428 if (display->platform.ivybridge) { 1429 reg = GEN7_MSG_CTL; 1430 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 1431 } else { 1432 reg = HSW_NDE_RSTWRN_OPT; 1433 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 1434 } 1435 1436 if (DISPLAY_VER(display) >= 14) 1437 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; 1438 1439 intel_de_rmw(display, reg, reset_bits, enable ? reset_bits : 0); 1440 } 1441 1442 static void skl_display_core_init(struct intel_display *display, 1443 bool resume) 1444 { 1445 struct i915_power_domains *power_domains = &display->power.domains; 1446 struct i915_power_well *well; 1447 1448 gen9_set_dc_state(display, DC_STATE_DISABLE); 1449 1450 /* enable PCH reset handshake */ 1451 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display)); 1452 1453 if (!HAS_DISPLAY(display)) 1454 return; 1455 1456 /* enable PG1 and Misc I/O */ 1457 mutex_lock(&power_domains->lock); 1458 1459 well = lookup_power_well(display, SKL_DISP_PW_1); 1460 intel_power_well_enable(display, well); 1461 1462 well = lookup_power_well(display, SKL_DISP_PW_MISC_IO); 1463 intel_power_well_enable(display, well); 1464 1465 mutex_unlock(&power_domains->lock); 1466 1467 intel_cdclk_init_hw(display); 1468 1469 gen9_dbuf_enable(display); 1470 1471 if (resume) 1472 intel_dmc_load_program(display); 1473 } 1474 1475 static void skl_display_core_uninit(struct intel_display *display) 1476 { 1477 struct i915_power_domains *power_domains = &display->power.domains; 1478 struct i915_power_well *well; 1479 1480 if (!HAS_DISPLAY(display)) 1481 return; 1482 1483 gen9_disable_dc_states(display); 1484 /* TODO: disable DMC program */ 1485 1486 gen9_dbuf_disable(display); 1487 1488 intel_cdclk_uninit_hw(display); 1489 1490 /* The spec doesn't call for removing the reset handshake flag */ 1491 /* disable PG1 and Misc I/O */ 1492 1493 mutex_lock(&power_domains->lock); 1494 1495 /* 1496 * BSpec says to keep the MISC IO power well enabled here, only 1497 * remove our request for power well 1. 1498 * Note that even though the driver's request is removed power well 1 1499 * may stay enabled after this due to DMC's own request on it. 1500 */ 1501 well = lookup_power_well(display, SKL_DISP_PW_1); 1502 intel_power_well_disable(display, well); 1503 1504 mutex_unlock(&power_domains->lock); 1505 1506 usleep_range(10, 30); /* 10 us delay per Bspec */ 1507 } 1508 1509 static void bxt_display_core_init(struct intel_display *display, bool resume) 1510 { 1511 struct i915_power_domains *power_domains = &display->power.domains; 1512 struct i915_power_well *well; 1513 1514 gen9_set_dc_state(display, DC_STATE_DISABLE); 1515 1516 /* 1517 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 1518 * or else the reset will hang because there is no PCH to respond. 1519 * Move the handshake programming to initialization sequence. 1520 * Previously was left up to BIOS. 1521 */ 1522 intel_pch_reset_handshake(display, false); 1523 1524 if (!HAS_DISPLAY(display)) 1525 return; 1526 1527 /* Enable PG1 */ 1528 mutex_lock(&power_domains->lock); 1529 1530 well = lookup_power_well(display, SKL_DISP_PW_1); 1531 intel_power_well_enable(display, well); 1532 1533 mutex_unlock(&power_domains->lock); 1534 1535 intel_cdclk_init_hw(display); 1536 1537 gen9_dbuf_enable(display); 1538 1539 if (resume) 1540 intel_dmc_load_program(display); 1541 } 1542 1543 static void bxt_display_core_uninit(struct intel_display *display) 1544 { 1545 struct i915_power_domains *power_domains = &display->power.domains; 1546 struct i915_power_well *well; 1547 1548 if (!HAS_DISPLAY(display)) 1549 return; 1550 1551 gen9_disable_dc_states(display); 1552 /* TODO: disable DMC program */ 1553 1554 gen9_dbuf_disable(display); 1555 1556 intel_cdclk_uninit_hw(display); 1557 1558 /* The spec doesn't call for removing the reset handshake flag */ 1559 1560 /* 1561 * Disable PW1 (PG1). 1562 * Note that even though the driver's request is removed power well 1 1563 * may stay enabled after this due to DMC's own request on it. 1564 */ 1565 mutex_lock(&power_domains->lock); 1566 1567 well = lookup_power_well(display, SKL_DISP_PW_1); 1568 intel_power_well_disable(display, well); 1569 1570 mutex_unlock(&power_domains->lock); 1571 1572 usleep_range(10, 30); /* 10 us delay per Bspec */ 1573 } 1574 1575 struct buddy_page_mask { 1576 u32 page_mask; 1577 u8 type; 1578 u8 num_channels; 1579 }; 1580 1581 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 1582 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 1583 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 1584 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 1585 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 1586 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 1587 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 1588 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 1589 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 1590 {} 1591 }; 1592 1593 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 1594 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 1595 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 1596 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 1597 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 1598 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 1599 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 1600 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 1601 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 1602 {} 1603 }; 1604 1605 static void tgl_bw_buddy_init(struct intel_display *display) 1606 { 1607 struct drm_i915_private *dev_priv = to_i915(display->drm); 1608 enum intel_dram_type type = dev_priv->dram_info.type; 1609 u8 num_channels = dev_priv->dram_info.num_channels; 1610 const struct buddy_page_mask *table; 1611 unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask; 1612 int config, i; 1613 1614 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 1615 if (display->platform.dgfx && !display->platform.dg1) 1616 return; 1617 1618 if (display->platform.alderlake_s || 1619 (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))) 1620 /* Wa_1409767108 */ 1621 table = wa_1409767108_buddy_page_masks; 1622 else 1623 table = tgl_buddy_page_masks; 1624 1625 for (config = 0; table[config].page_mask != 0; config++) 1626 if (table[config].num_channels == num_channels && 1627 table[config].type == type) 1628 break; 1629 1630 if (table[config].page_mask == 0) { 1631 drm_dbg_kms(display->drm, 1632 "Unknown memory configuration; disabling address buddy logic.\n"); 1633 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 1634 intel_de_write(display, BW_BUDDY_CTL(i), 1635 BW_BUDDY_DISABLE); 1636 } else { 1637 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 1638 intel_de_write(display, BW_BUDDY_PAGE_MASK(i), 1639 table[config].page_mask); 1640 1641 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 1642 if (DISPLAY_VER(display) == 12) 1643 intel_de_rmw(display, BW_BUDDY_CTL(i), 1644 BW_BUDDY_TLB_REQ_TIMER_MASK, 1645 BW_BUDDY_TLB_REQ_TIMER(0x8)); 1646 } 1647 } 1648 } 1649 1650 static void icl_display_core_init(struct intel_display *display, 1651 bool resume) 1652 { 1653 struct i915_power_domains *power_domains = &display->power.domains; 1654 struct i915_power_well *well; 1655 1656 gen9_set_dc_state(display, DC_STATE_DISABLE); 1657 1658 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 1659 if (INTEL_PCH_TYPE(display) >= PCH_TGP && 1660 INTEL_PCH_TYPE(display) < PCH_DG1) 1661 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0, 1662 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 1663 1664 /* 1. Enable PCH reset handshake. */ 1665 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display)); 1666 1667 if (!HAS_DISPLAY(display)) 1668 return; 1669 1670 /* 2. Initialize all combo phys */ 1671 intel_combo_phy_init(display); 1672 1673 /* 1674 * 3. Enable Power Well 1 (PG1). 1675 * The AUX IO power wells will be enabled on demand. 1676 */ 1677 mutex_lock(&power_domains->lock); 1678 well = lookup_power_well(display, SKL_DISP_PW_1); 1679 intel_power_well_enable(display, well); 1680 mutex_unlock(&power_domains->lock); 1681 1682 if (DISPLAY_VER(display) == 14) 1683 intel_de_rmw(display, DC_STATE_EN, 1684 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0); 1685 1686 /* 4. Enable CDCLK. */ 1687 intel_cdclk_init_hw(display); 1688 1689 if (DISPLAY_VER(display) == 12 || display->platform.dg2) 1690 gen12_dbuf_slices_config(display); 1691 1692 /* 5. Enable DBUF. */ 1693 gen9_dbuf_enable(display); 1694 1695 /* 6. Setup MBUS. */ 1696 icl_mbus_init(display); 1697 1698 /* 7. Program arbiter BW_BUDDY registers */ 1699 if (DISPLAY_VER(display) >= 12) 1700 tgl_bw_buddy_init(display); 1701 1702 /* 8. Ensure PHYs have completed calibration and adaptation */ 1703 if (display->platform.dg2) 1704 intel_snps_phy_wait_for_calibration(display); 1705 1706 /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */ 1707 if (DISPLAY_VERx100(display) == 1401) 1708 intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1); 1709 1710 if (resume) 1711 intel_dmc_load_program(display); 1712 1713 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */ 1714 if (IS_DISPLAY_VERx100(display, 1200, 1300)) 1715 intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0, 1716 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 1717 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); 1718 1719 /* Wa_14011503030:xelpd */ 1720 if (DISPLAY_VER(display) == 13) 1721 intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 1722 1723 /* Wa_15013987218 */ 1724 if (DISPLAY_VER(display) == 20) { 1725 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 1726 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE); 1727 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 1728 PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0); 1729 } 1730 } 1731 1732 static void icl_display_core_uninit(struct intel_display *display) 1733 { 1734 struct i915_power_domains *power_domains = &display->power.domains; 1735 struct i915_power_well *well; 1736 1737 if (!HAS_DISPLAY(display)) 1738 return; 1739 1740 gen9_disable_dc_states(display); 1741 intel_dmc_disable_program(display); 1742 1743 /* 1. Disable all display engine functions -> already done */ 1744 1745 /* 2. Disable DBUF */ 1746 gen9_dbuf_disable(display); 1747 1748 /* 3. Disable CD clock */ 1749 intel_cdclk_uninit_hw(display); 1750 1751 if (DISPLAY_VER(display) == 14) 1752 intel_de_rmw(display, DC_STATE_EN, 0, 1753 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH); 1754 1755 /* 1756 * 4. Disable Power Well 1 (PG1). 1757 * The AUX IO power wells are toggled on demand, so they are already 1758 * disabled at this point. 1759 */ 1760 mutex_lock(&power_domains->lock); 1761 well = lookup_power_well(display, SKL_DISP_PW_1); 1762 intel_power_well_disable(display, well); 1763 mutex_unlock(&power_domains->lock); 1764 1765 /* 5. */ 1766 intel_combo_phy_uninit(display); 1767 } 1768 1769 static void chv_phy_control_init(struct intel_display *display) 1770 { 1771 struct i915_power_well *cmn_bc = 1772 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC); 1773 struct i915_power_well *cmn_d = 1774 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D); 1775 1776 /* 1777 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 1778 * workaround never ever read DISPLAY_PHY_CONTROL, and 1779 * instead maintain a shadow copy ourselves. Use the actual 1780 * power well state and lane status to reconstruct the 1781 * expected initial value. 1782 */ 1783 display->power.chv_phy_control = 1784 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 1785 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 1786 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 1787 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 1788 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 1789 1790 /* 1791 * If all lanes are disabled we leave the override disabled 1792 * with all power down bits cleared to match the state we 1793 * would use after disabling the port. Otherwise enable the 1794 * override and set the lane powerdown bits accding to the 1795 * current lane status. 1796 */ 1797 if (intel_power_well_is_enabled(display, cmn_bc)) { 1798 u32 status = intel_de_read(display, DPLL(display, PIPE_A)); 1799 unsigned int mask; 1800 1801 mask = status & DPLL_PORTB_READY_MASK; 1802 if (mask == 0xf) 1803 mask = 0x0; 1804 else 1805 display->power.chv_phy_control |= 1806 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 1807 1808 display->power.chv_phy_control |= 1809 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 1810 1811 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 1812 if (mask == 0xf) 1813 mask = 0x0; 1814 else 1815 display->power.chv_phy_control |= 1816 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 1817 1818 display->power.chv_phy_control |= 1819 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 1820 1821 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1822 1823 display->power.chv_phy_assert[DPIO_PHY0] = false; 1824 } else { 1825 display->power.chv_phy_assert[DPIO_PHY0] = true; 1826 } 1827 1828 if (intel_power_well_is_enabled(display, cmn_d)) { 1829 u32 status = intel_de_read(display, DPIO_PHY_STATUS); 1830 unsigned int mask; 1831 1832 mask = status & DPLL_PORTD_READY_MASK; 1833 1834 if (mask == 0xf) 1835 mask = 0x0; 1836 else 1837 display->power.chv_phy_control |= 1838 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 1839 1840 display->power.chv_phy_control |= 1841 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 1842 1843 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 1844 1845 display->power.chv_phy_assert[DPIO_PHY1] = false; 1846 } else { 1847 display->power.chv_phy_assert[DPIO_PHY1] = true; 1848 } 1849 1850 drm_dbg_kms(display->drm, "Initial PHY_CONTROL=0x%08x\n", 1851 display->power.chv_phy_control); 1852 1853 /* Defer application of initial phy_control to enabling the powerwell */ 1854 } 1855 1856 static void vlv_cmnlane_wa(struct intel_display *display) 1857 { 1858 struct i915_power_well *cmn = 1859 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC); 1860 struct i915_power_well *disp2d = 1861 lookup_power_well(display, VLV_DISP_PW_DISP2D); 1862 1863 /* If the display might be already active skip this */ 1864 if (intel_power_well_is_enabled(display, cmn) && 1865 intel_power_well_is_enabled(display, disp2d) && 1866 intel_de_read(display, DPIO_CTL) & DPIO_CMNRST) 1867 return; 1868 1869 drm_dbg_kms(display->drm, "toggling display PHY side reset\n"); 1870 1871 /* cmnlane needs DPLL registers */ 1872 intel_power_well_enable(display, disp2d); 1873 1874 /* 1875 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 1876 * Need to assert and de-assert PHY SB reset by gating the 1877 * common lane power, then un-gating it. 1878 * Simply ungating isn't enough to reset the PHY enough to get 1879 * ports and lanes running. 1880 */ 1881 intel_power_well_disable(display, cmn); 1882 } 1883 1884 static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0) 1885 { 1886 struct drm_i915_private *dev_priv = to_i915(display->drm); 1887 bool ret; 1888 1889 vlv_punit_get(dev_priv); 1890 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 1891 vlv_punit_put(dev_priv); 1892 1893 return ret; 1894 } 1895 1896 static void assert_ved_power_gated(struct intel_display *display) 1897 { 1898 drm_WARN(display->drm, 1899 !vlv_punit_is_power_gated(display, PUNIT_REG_VEDSSPM0), 1900 "VED not power gated\n"); 1901 } 1902 1903 static void assert_isp_power_gated(struct intel_display *display) 1904 { 1905 static const struct pci_device_id isp_ids[] = { 1906 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 1907 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 1908 {} 1909 }; 1910 1911 drm_WARN(display->drm, !pci_dev_present(isp_ids) && 1912 !vlv_punit_is_power_gated(display, PUNIT_REG_ISPSSPM0), 1913 "ISP not power gated\n"); 1914 } 1915 1916 static void intel_power_domains_verify_state(struct intel_display *display); 1917 1918 /** 1919 * intel_power_domains_init_hw - initialize hardware power domain state 1920 * @display: display device instance 1921 * @resume: Called from resume code paths or not 1922 * 1923 * This function initializes the hardware power domain state and enables all 1924 * power wells belonging to the INIT power domain. Power wells in other 1925 * domains (and not in the INIT domain) are referenced or disabled by 1926 * intel_modeset_readout_hw_state(). After that the reference count of each 1927 * power well must match its HW enabled state, see 1928 * intel_power_domains_verify_state(). 1929 * 1930 * It will return with power domains disabled (to be enabled later by 1931 * intel_power_domains_enable()) and must be paired with 1932 * intel_power_domains_driver_remove(). 1933 */ 1934 void intel_power_domains_init_hw(struct intel_display *display, bool resume) 1935 { 1936 struct i915_power_domains *power_domains = &display->power.domains; 1937 1938 power_domains->initializing = true; 1939 1940 if (DISPLAY_VER(display) >= 11) { 1941 icl_display_core_init(display, resume); 1942 } else if (display->platform.geminilake || display->platform.broxton) { 1943 bxt_display_core_init(display, resume); 1944 } else if (DISPLAY_VER(display) == 9) { 1945 skl_display_core_init(display, resume); 1946 } else if (display->platform.cherryview) { 1947 mutex_lock(&power_domains->lock); 1948 chv_phy_control_init(display); 1949 mutex_unlock(&power_domains->lock); 1950 assert_isp_power_gated(display); 1951 } else if (display->platform.valleyview) { 1952 mutex_lock(&power_domains->lock); 1953 vlv_cmnlane_wa(display); 1954 mutex_unlock(&power_domains->lock); 1955 assert_ved_power_gated(display); 1956 assert_isp_power_gated(display); 1957 } else if (display->platform.broadwell || display->platform.haswell) { 1958 hsw_assert_cdclk(display); 1959 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display)); 1960 } else if (display->platform.ivybridge) { 1961 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display)); 1962 } 1963 1964 /* 1965 * Keep all power wells enabled for any dependent HW access during 1966 * initialization and to make sure we keep BIOS enabled display HW 1967 * resources powered until display HW readout is complete. We drop 1968 * this reference in intel_power_domains_enable(). 1969 */ 1970 drm_WARN_ON(display->drm, power_domains->init_wakeref); 1971 power_domains->init_wakeref = 1972 intel_display_power_get(display, POWER_DOMAIN_INIT); 1973 1974 /* Disable power support if the user asked so. */ 1975 if (!display->params.disable_power_well) { 1976 drm_WARN_ON(display->drm, power_domains->disable_wakeref); 1977 display->power.domains.disable_wakeref = intel_display_power_get(display, 1978 POWER_DOMAIN_INIT); 1979 } 1980 intel_power_domains_sync_hw(display); 1981 1982 power_domains->initializing = false; 1983 } 1984 1985 /** 1986 * intel_power_domains_driver_remove - deinitialize hw power domain state 1987 * @display: display device instance 1988 * 1989 * De-initializes the display power domain HW state. It also ensures that the 1990 * device stays powered up so that the driver can be reloaded. 1991 * 1992 * It must be called with power domains already disabled (after a call to 1993 * intel_power_domains_disable()) and must be paired with 1994 * intel_power_domains_init_hw(). 1995 */ 1996 void intel_power_domains_driver_remove(struct intel_display *display) 1997 { 1998 intel_wakeref_t wakeref __maybe_unused = 1999 fetch_and_zero(&display->power.domains.init_wakeref); 2000 2001 /* Remove the refcount we took to keep power well support disabled. */ 2002 if (!display->params.disable_power_well) 2003 intel_display_power_put(display, POWER_DOMAIN_INIT, 2004 fetch_and_zero(&display->power.domains.disable_wakeref)); 2005 2006 intel_display_power_flush_work_sync(display); 2007 2008 intel_power_domains_verify_state(display); 2009 2010 /* Keep the power well enabled, but cancel its rpm wakeref. */ 2011 intel_display_rpm_put(display, wakeref); 2012 } 2013 2014 /** 2015 * intel_power_domains_sanitize_state - sanitize power domains state 2016 * @display: display device instance 2017 * 2018 * Sanitize the power domains state during driver loading and system resume. 2019 * The function will disable all display power wells that BIOS has enabled 2020 * without a user for it (any user for a power well has taken a reference 2021 * on it by the time this function is called, after the state of all the 2022 * pipe, encoder, etc. HW resources have been sanitized). 2023 */ 2024 void intel_power_domains_sanitize_state(struct intel_display *display) 2025 { 2026 struct i915_power_domains *power_domains = &display->power.domains; 2027 struct i915_power_well *power_well; 2028 2029 mutex_lock(&power_domains->lock); 2030 2031 for_each_power_well_reverse(display, power_well) { 2032 if (power_well->desc->always_on || power_well->count || 2033 !intel_power_well_is_enabled(display, power_well)) 2034 continue; 2035 2036 drm_dbg_kms(display->drm, 2037 "BIOS left unused %s power well enabled, disabling it\n", 2038 intel_power_well_name(power_well)); 2039 intel_power_well_disable(display, power_well); 2040 } 2041 2042 mutex_unlock(&power_domains->lock); 2043 } 2044 2045 /** 2046 * intel_power_domains_enable - enable toggling of display power wells 2047 * @display: display device instance 2048 * 2049 * Enable the ondemand enabling/disabling of the display power wells. Note that 2050 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 2051 * only at specific points of the display modeset sequence, thus they are not 2052 * affected by the intel_power_domains_enable()/disable() calls. The purpose 2053 * of these function is to keep the rest of power wells enabled until the end 2054 * of display HW readout (which will acquire the power references reflecting 2055 * the current HW state). 2056 */ 2057 void intel_power_domains_enable(struct intel_display *display) 2058 { 2059 intel_wakeref_t wakeref __maybe_unused = 2060 fetch_and_zero(&display->power.domains.init_wakeref); 2061 2062 intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref); 2063 intel_power_domains_verify_state(display); 2064 } 2065 2066 /** 2067 * intel_power_domains_disable - disable toggling of display power wells 2068 * @display: display device instance 2069 * 2070 * Disable the ondemand enabling/disabling of the display power wells. See 2071 * intel_power_domains_enable() for which power wells this call controls. 2072 */ 2073 void intel_power_domains_disable(struct intel_display *display) 2074 { 2075 struct i915_power_domains *power_domains = &display->power.domains; 2076 2077 drm_WARN_ON(display->drm, power_domains->init_wakeref); 2078 power_domains->init_wakeref = 2079 intel_display_power_get(display, POWER_DOMAIN_INIT); 2080 2081 intel_power_domains_verify_state(display); 2082 } 2083 2084 /** 2085 * intel_power_domains_suspend - suspend power domain state 2086 * @display: display device instance 2087 * @s2idle: specifies whether we go to idle, or deeper sleep 2088 * 2089 * This function prepares the hardware power domain state before entering 2090 * system suspend. 2091 * 2092 * It must be called with power domains already disabled (after a call to 2093 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 2094 */ 2095 void intel_power_domains_suspend(struct intel_display *display, bool s2idle) 2096 { 2097 struct i915_power_domains *power_domains = &display->power.domains; 2098 intel_wakeref_t wakeref __maybe_unused = 2099 fetch_and_zero(&power_domains->init_wakeref); 2100 2101 intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref); 2102 2103 /* 2104 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 2105 * support don't manually deinit the power domains. This also means the 2106 * DMC firmware will stay active, it will power down any HW 2107 * resources as required and also enable deeper system power states 2108 * that would be blocked if the firmware was inactive. 2109 */ 2110 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle && 2111 intel_dmc_has_payload(display)) { 2112 intel_display_power_flush_work(display); 2113 intel_power_domains_verify_state(display); 2114 return; 2115 } 2116 2117 /* 2118 * Even if power well support was disabled we still want to disable 2119 * power wells if power domains must be deinitialized for suspend. 2120 */ 2121 if (!display->params.disable_power_well) 2122 intel_display_power_put(display, POWER_DOMAIN_INIT, 2123 fetch_and_zero(&display->power.domains.disable_wakeref)); 2124 2125 intel_display_power_flush_work(display); 2126 intel_power_domains_verify_state(display); 2127 2128 if (DISPLAY_VER(display) >= 11) 2129 icl_display_core_uninit(display); 2130 else if (display->platform.geminilake || display->platform.broxton) 2131 bxt_display_core_uninit(display); 2132 else if (DISPLAY_VER(display) == 9) 2133 skl_display_core_uninit(display); 2134 2135 power_domains->display_core_suspended = true; 2136 } 2137 2138 /** 2139 * intel_power_domains_resume - resume power domain state 2140 * @display: display device instance 2141 * 2142 * This function resume the hardware power domain state during system resume. 2143 * 2144 * It will return with power domain support disabled (to be enabled later by 2145 * intel_power_domains_enable()) and must be paired with 2146 * intel_power_domains_suspend(). 2147 */ 2148 void intel_power_domains_resume(struct intel_display *display) 2149 { 2150 struct i915_power_domains *power_domains = &display->power.domains; 2151 2152 if (power_domains->display_core_suspended) { 2153 intel_power_domains_init_hw(display, true); 2154 power_domains->display_core_suspended = false; 2155 } else { 2156 drm_WARN_ON(display->drm, power_domains->init_wakeref); 2157 power_domains->init_wakeref = 2158 intel_display_power_get(display, POWER_DOMAIN_INIT); 2159 } 2160 2161 intel_power_domains_verify_state(display); 2162 } 2163 2164 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2165 2166 static void intel_power_domains_dump_info(struct intel_display *display) 2167 { 2168 struct i915_power_domains *power_domains = &display->power.domains; 2169 struct i915_power_well *power_well; 2170 2171 for_each_power_well(display, power_well) { 2172 enum intel_display_power_domain domain; 2173 2174 drm_dbg_kms(display->drm, "%-25s %d\n", 2175 intel_power_well_name(power_well), intel_power_well_refcount(power_well)); 2176 2177 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2178 drm_dbg_kms(display->drm, " %-23s %d\n", 2179 intel_display_power_domain_str(domain), 2180 power_domains->domain_use_count[domain]); 2181 } 2182 } 2183 2184 /** 2185 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 2186 * @display: display device instance 2187 * 2188 * Verify if the reference count of each power well matches its HW enabled 2189 * state and the total refcount of the domains it belongs to. This must be 2190 * called after modeset HW state sanitization, which is responsible for 2191 * acquiring reference counts for any power wells in use and disabling the 2192 * ones left on by BIOS but not required by any active output. 2193 */ 2194 static void intel_power_domains_verify_state(struct intel_display *display) 2195 { 2196 struct i915_power_domains *power_domains = &display->power.domains; 2197 struct i915_power_well *power_well; 2198 bool dump_domain_info; 2199 2200 mutex_lock(&power_domains->lock); 2201 2202 verify_async_put_domains_state(power_domains); 2203 2204 dump_domain_info = false; 2205 for_each_power_well(display, power_well) { 2206 enum intel_display_power_domain domain; 2207 int domains_count; 2208 bool enabled; 2209 2210 enabled = intel_power_well_is_enabled(display, power_well); 2211 if ((intel_power_well_refcount(power_well) || 2212 intel_power_well_is_always_on(power_well)) != 2213 enabled) 2214 drm_err(display->drm, 2215 "power well %s state mismatch (refcount %d/enabled %d)", 2216 intel_power_well_name(power_well), 2217 intel_power_well_refcount(power_well), enabled); 2218 2219 domains_count = 0; 2220 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2221 domains_count += power_domains->domain_use_count[domain]; 2222 2223 if (intel_power_well_refcount(power_well) != domains_count) { 2224 drm_err(display->drm, 2225 "power well %s refcount/domain refcount mismatch " 2226 "(refcount %d/domains refcount %d)\n", 2227 intel_power_well_name(power_well), 2228 intel_power_well_refcount(power_well), 2229 domains_count); 2230 dump_domain_info = true; 2231 } 2232 } 2233 2234 if (dump_domain_info) { 2235 static bool dumped; 2236 2237 if (!dumped) { 2238 intel_power_domains_dump_info(display); 2239 dumped = true; 2240 } 2241 } 2242 2243 mutex_unlock(&power_domains->lock); 2244 } 2245 2246 #else 2247 2248 static void intel_power_domains_verify_state(struct intel_display *display) 2249 { 2250 } 2251 2252 #endif 2253 2254 void intel_display_power_suspend_late(struct intel_display *display, bool s2idle) 2255 { 2256 intel_power_domains_suspend(display, s2idle); 2257 2258 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake || 2259 display->platform.broxton) { 2260 bxt_enable_dc9(display); 2261 } else if (display->platform.haswell || display->platform.broadwell) { 2262 hsw_enable_pc8(display); 2263 } 2264 2265 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2266 if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1) 2267 intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 2268 } 2269 2270 void intel_display_power_resume_early(struct intel_display *display) 2271 { 2272 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake || 2273 display->platform.broxton) { 2274 gen9_sanitize_dc_state(display); 2275 bxt_disable_dc9(display); 2276 } else if (display->platform.haswell || display->platform.broadwell) { 2277 hsw_disable_pc8(display); 2278 } 2279 2280 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2281 if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1) 2282 intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 2283 2284 intel_power_domains_resume(display); 2285 } 2286 2287 void intel_display_power_suspend(struct intel_display *display) 2288 { 2289 if (DISPLAY_VER(display) >= 11) { 2290 icl_display_core_uninit(display); 2291 bxt_enable_dc9(display); 2292 } else if (display->platform.geminilake || display->platform.broxton) { 2293 bxt_display_core_uninit(display); 2294 bxt_enable_dc9(display); 2295 } else if (display->platform.haswell || display->platform.broadwell) { 2296 hsw_enable_pc8(display); 2297 } 2298 } 2299 2300 void intel_display_power_resume(struct intel_display *display) 2301 { 2302 struct i915_power_domains *power_domains = &display->power.domains; 2303 2304 if (DISPLAY_VER(display) >= 11) { 2305 bxt_disable_dc9(display); 2306 icl_display_core_init(display, true); 2307 if (intel_dmc_has_payload(display)) { 2308 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 2309 skl_enable_dc6(display); 2310 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 2311 gen9_enable_dc5(display); 2312 } 2313 } else if (display->platform.geminilake || display->platform.broxton) { 2314 bxt_disable_dc9(display); 2315 bxt_display_core_init(display, true); 2316 if (intel_dmc_has_payload(display) && 2317 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2318 gen9_enable_dc5(display); 2319 } else if (display->platform.haswell || display->platform.broadwell) { 2320 hsw_disable_pc8(display); 2321 } 2322 } 2323 2324 void intel_display_power_debug(struct intel_display *display, struct seq_file *m) 2325 { 2326 struct i915_power_domains *power_domains = &display->power.domains; 2327 int i; 2328 2329 mutex_lock(&power_domains->lock); 2330 2331 seq_printf(m, "Runtime power status: %s\n", 2332 str_enabled_disabled(!power_domains->init_wakeref)); 2333 2334 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2335 for (i = 0; i < power_domains->power_well_count; i++) { 2336 struct i915_power_well *power_well; 2337 enum intel_display_power_domain power_domain; 2338 2339 power_well = &power_domains->power_wells[i]; 2340 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), 2341 intel_power_well_refcount(power_well)); 2342 2343 for_each_power_domain(power_domain, intel_power_well_domains(power_well)) 2344 seq_printf(m, " %-23s %d\n", 2345 intel_display_power_domain_str(power_domain), 2346 power_domains->domain_use_count[power_domain]); 2347 } 2348 2349 mutex_unlock(&power_domains->lock); 2350 } 2351 2352 struct intel_ddi_port_domains { 2353 enum port port_start; 2354 enum port port_end; 2355 enum aux_ch aux_ch_start; 2356 enum aux_ch aux_ch_end; 2357 2358 enum intel_display_power_domain ddi_lanes; 2359 enum intel_display_power_domain ddi_io; 2360 enum intel_display_power_domain aux_io; 2361 enum intel_display_power_domain aux_legacy_usbc; 2362 enum intel_display_power_domain aux_tbt; 2363 }; 2364 2365 static const struct intel_ddi_port_domains 2366 i9xx_port_domains[] = { 2367 { 2368 .port_start = PORT_A, 2369 .port_end = PORT_F, 2370 .aux_ch_start = AUX_CH_A, 2371 .aux_ch_end = AUX_CH_F, 2372 2373 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2374 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2375 .aux_io = POWER_DOMAIN_AUX_IO_A, 2376 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2377 .aux_tbt = POWER_DOMAIN_INVALID, 2378 }, 2379 }; 2380 2381 static const struct intel_ddi_port_domains 2382 d11_port_domains[] = { 2383 { 2384 .port_start = PORT_A, 2385 .port_end = PORT_B, 2386 .aux_ch_start = AUX_CH_A, 2387 .aux_ch_end = AUX_CH_B, 2388 2389 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2390 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2391 .aux_io = POWER_DOMAIN_AUX_IO_A, 2392 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2393 .aux_tbt = POWER_DOMAIN_INVALID, 2394 }, { 2395 .port_start = PORT_C, 2396 .port_end = PORT_F, 2397 .aux_ch_start = AUX_CH_C, 2398 .aux_ch_end = AUX_CH_F, 2399 2400 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, 2401 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, 2402 .aux_io = POWER_DOMAIN_AUX_IO_C, 2403 .aux_legacy_usbc = POWER_DOMAIN_AUX_C, 2404 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2405 }, 2406 }; 2407 2408 static const struct intel_ddi_port_domains 2409 d12_port_domains[] = { 2410 { 2411 .port_start = PORT_A, 2412 .port_end = PORT_C, 2413 .aux_ch_start = AUX_CH_A, 2414 .aux_ch_end = AUX_CH_C, 2415 2416 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2417 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2418 .aux_io = POWER_DOMAIN_AUX_IO_A, 2419 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2420 .aux_tbt = POWER_DOMAIN_INVALID, 2421 }, { 2422 .port_start = PORT_TC1, 2423 .port_end = PORT_TC6, 2424 .aux_ch_start = AUX_CH_USBC1, 2425 .aux_ch_end = AUX_CH_USBC6, 2426 2427 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2428 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2429 .aux_io = POWER_DOMAIN_INVALID, 2430 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2431 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2432 }, 2433 }; 2434 2435 static const struct intel_ddi_port_domains 2436 d13_port_domains[] = { 2437 { 2438 .port_start = PORT_A, 2439 .port_end = PORT_C, 2440 .aux_ch_start = AUX_CH_A, 2441 .aux_ch_end = AUX_CH_C, 2442 2443 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2444 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2445 .aux_io = POWER_DOMAIN_AUX_IO_A, 2446 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2447 .aux_tbt = POWER_DOMAIN_INVALID, 2448 }, { 2449 .port_start = PORT_TC1, 2450 .port_end = PORT_TC4, 2451 .aux_ch_start = AUX_CH_USBC1, 2452 .aux_ch_end = AUX_CH_USBC4, 2453 2454 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2455 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2456 .aux_io = POWER_DOMAIN_INVALID, 2457 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2458 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2459 }, { 2460 .port_start = PORT_D_XELPD, 2461 .port_end = PORT_E_XELPD, 2462 .aux_ch_start = AUX_CH_D_XELPD, 2463 .aux_ch_end = AUX_CH_E_XELPD, 2464 2465 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, 2466 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, 2467 .aux_io = POWER_DOMAIN_AUX_IO_D, 2468 .aux_legacy_usbc = POWER_DOMAIN_AUX_D, 2469 .aux_tbt = POWER_DOMAIN_INVALID, 2470 }, 2471 }; 2472 2473 static void 2474 intel_port_domains_for_platform(struct intel_display *display, 2475 const struct intel_ddi_port_domains **domains, 2476 int *domains_size) 2477 { 2478 if (DISPLAY_VER(display) >= 13) { 2479 *domains = d13_port_domains; 2480 *domains_size = ARRAY_SIZE(d13_port_domains); 2481 } else if (DISPLAY_VER(display) >= 12) { 2482 *domains = d12_port_domains; 2483 *domains_size = ARRAY_SIZE(d12_port_domains); 2484 } else if (DISPLAY_VER(display) >= 11) { 2485 *domains = d11_port_domains; 2486 *domains_size = ARRAY_SIZE(d11_port_domains); 2487 } else { 2488 *domains = i9xx_port_domains; 2489 *domains_size = ARRAY_SIZE(i9xx_port_domains); 2490 } 2491 } 2492 2493 static const struct intel_ddi_port_domains * 2494 intel_port_domains_for_port(struct intel_display *display, enum port port) 2495 { 2496 const struct intel_ddi_port_domains *domains; 2497 int domains_size; 2498 int i; 2499 2500 intel_port_domains_for_platform(display, &domains, &domains_size); 2501 for (i = 0; i < domains_size; i++) 2502 if (port >= domains[i].port_start && port <= domains[i].port_end) 2503 return &domains[i]; 2504 2505 return NULL; 2506 } 2507 2508 enum intel_display_power_domain 2509 intel_display_power_ddi_io_domain(struct intel_display *display, enum port port) 2510 { 2511 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port); 2512 2513 if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) 2514 return POWER_DOMAIN_PORT_DDI_IO_A; 2515 2516 return domains->ddi_io + (int)(port - domains->port_start); 2517 } 2518 2519 enum intel_display_power_domain 2520 intel_display_power_ddi_lanes_domain(struct intel_display *display, enum port port) 2521 { 2522 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port); 2523 2524 if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) 2525 return POWER_DOMAIN_PORT_DDI_LANES_A; 2526 2527 return domains->ddi_lanes + (int)(port - domains->port_start); 2528 } 2529 2530 static const struct intel_ddi_port_domains * 2531 intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch) 2532 { 2533 const struct intel_ddi_port_domains *domains; 2534 int domains_size; 2535 int i; 2536 2537 intel_port_domains_for_platform(display, &domains, &domains_size); 2538 for (i = 0; i < domains_size; i++) 2539 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) 2540 return &domains[i]; 2541 2542 return NULL; 2543 } 2544 2545 enum intel_display_power_domain 2546 intel_display_power_aux_io_domain(struct intel_display *display, enum aux_ch aux_ch) 2547 { 2548 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch); 2549 2550 if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID)) 2551 return POWER_DOMAIN_AUX_IO_A; 2552 2553 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start); 2554 } 2555 2556 enum intel_display_power_domain 2557 intel_display_power_legacy_aux_domain(struct intel_display *display, enum aux_ch aux_ch) 2558 { 2559 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch); 2560 2561 if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) 2562 return POWER_DOMAIN_AUX_A; 2563 2564 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); 2565 } 2566 2567 enum intel_display_power_domain 2568 intel_display_power_tbt_aux_domain(struct intel_display *display, enum aux_ch aux_ch) 2569 { 2570 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch); 2571 2572 if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) 2573 return POWER_DOMAIN_AUX_TBT1; 2574 2575 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); 2576 } 2577