1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/pm_runtime.h> 25 #include <asm/iosf_mbi.h> 26 27 #include "i915_drv.h" 28 #include "i915_vgpu.h" 29 #include "intel_drv.h" 30 #include "intel_pm.h" 31 32 #define FORCEWAKE_ACK_TIMEOUT_MS 50 33 #define GT_FIFO_TIMEOUT_MS 10 34 35 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) 36 37 static const char * const forcewake_domain_names[] = { 38 "render", 39 "blitter", 40 "media", 41 "vdbox0", 42 "vdbox1", 43 "vdbox2", 44 "vdbox3", 45 "vebox0", 46 "vebox1", 47 }; 48 49 const char * 50 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 51 { 52 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); 53 54 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 55 return forcewake_domain_names[id]; 56 57 WARN_ON(id); 58 59 return "unknown"; 60 } 61 62 #define fw_ack(d) readl((d)->reg_ack) 63 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) 64 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) 65 66 static inline void 67 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 68 { 69 /* 70 * We don't really know if the powerwell for the forcewake domain we are 71 * trying to reset here does exist at this point (engines could be fused 72 * off in ICL+), so no waiting for acks 73 */ 74 /* WaRsClearFWBitsAtReset:bdw,skl */ 75 fw_clear(d, 0xffff); 76 } 77 78 static inline void 79 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 80 { 81 d->wake_count++; 82 hrtimer_start_range_ns(&d->timer, 83 NSEC_PER_MSEC, 84 NSEC_PER_MSEC, 85 HRTIMER_MODE_REL); 86 } 87 88 static inline int 89 __wait_for_ack(const struct intel_uncore_forcewake_domain *d, 90 const u32 ack, 91 const u32 value) 92 { 93 return wait_for_atomic((fw_ack(d) & ack) == value, 94 FORCEWAKE_ACK_TIMEOUT_MS); 95 } 96 97 static inline int 98 wait_ack_clear(const struct intel_uncore_forcewake_domain *d, 99 const u32 ack) 100 { 101 return __wait_for_ack(d, ack, 0); 102 } 103 104 static inline int 105 wait_ack_set(const struct intel_uncore_forcewake_domain *d, 106 const u32 ack) 107 { 108 return __wait_for_ack(d, ack, ack); 109 } 110 111 static inline void 112 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 113 { 114 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { 115 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 116 intel_uncore_forcewake_domain_to_str(d->id)); 117 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ 118 } 119 } 120 121 enum ack_type { 122 ACK_CLEAR = 0, 123 ACK_SET 124 }; 125 126 static int 127 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, 128 const enum ack_type type) 129 { 130 const u32 ack_bit = FORCEWAKE_KERNEL; 131 const u32 value = type == ACK_SET ? ack_bit : 0; 132 unsigned int pass; 133 bool ack_detected; 134 135 /* 136 * There is a possibility of driver's wake request colliding 137 * with hardware's own wake requests and that can cause 138 * hardware to not deliver the driver's ack message. 139 * 140 * Use a fallback bit toggle to kick the gpu state machine 141 * in the hope that the original ack will be delivered along with 142 * the fallback ack. 143 * 144 * This workaround is described in HSDES #1604254524 and it's known as: 145 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl 146 * although the name is a bit misleading. 147 */ 148 149 pass = 1; 150 do { 151 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); 152 153 fw_set(d, FORCEWAKE_KERNEL_FALLBACK); 154 /* Give gt some time to relax before the polling frenzy */ 155 udelay(10 * pass); 156 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); 157 158 ack_detected = (fw_ack(d) & ack_bit) == value; 159 160 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); 161 } while (!ack_detected && pass++ < 10); 162 163 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", 164 intel_uncore_forcewake_domain_to_str(d->id), 165 type == ACK_SET ? "set" : "clear", 166 fw_ack(d), 167 pass); 168 169 return ack_detected ? 0 : -ETIMEDOUT; 170 } 171 172 static inline void 173 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) 174 { 175 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) 176 return; 177 178 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) 179 fw_domain_wait_ack_clear(d); 180 } 181 182 static inline void 183 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 184 { 185 fw_set(d, FORCEWAKE_KERNEL); 186 } 187 188 static inline void 189 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) 190 { 191 if (wait_ack_set(d, FORCEWAKE_KERNEL)) { 192 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 193 intel_uncore_forcewake_domain_to_str(d->id)); 194 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ 195 } 196 } 197 198 static inline void 199 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) 200 { 201 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) 202 return; 203 204 if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) 205 fw_domain_wait_ack_set(d); 206 } 207 208 static inline void 209 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 210 { 211 fw_clear(d, FORCEWAKE_KERNEL); 212 } 213 214 static void 215 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 216 { 217 struct intel_uncore_forcewake_domain *d; 218 unsigned int tmp; 219 220 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 221 222 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 223 fw_domain_wait_ack_clear(d); 224 fw_domain_get(d); 225 } 226 227 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 228 fw_domain_wait_ack_set(d); 229 230 uncore->fw_domains_active |= fw_domains; 231 } 232 233 static void 234 fw_domains_get_with_fallback(struct intel_uncore *uncore, 235 enum forcewake_domains fw_domains) 236 { 237 struct intel_uncore_forcewake_domain *d; 238 unsigned int tmp; 239 240 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 241 242 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { 243 fw_domain_wait_ack_clear_fallback(d); 244 fw_domain_get(d); 245 } 246 247 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 248 fw_domain_wait_ack_set_fallback(d); 249 250 uncore->fw_domains_active |= fw_domains; 251 } 252 253 static void 254 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) 255 { 256 struct intel_uncore_forcewake_domain *d; 257 unsigned int tmp; 258 259 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 260 261 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 262 fw_domain_put(d); 263 264 uncore->fw_domains_active &= ~fw_domains; 265 } 266 267 static void 268 fw_domains_reset(struct intel_uncore *uncore, 269 enum forcewake_domains fw_domains) 270 { 271 struct intel_uncore_forcewake_domain *d; 272 unsigned int tmp; 273 274 if (!fw_domains) 275 return; 276 277 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 278 279 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) 280 fw_domain_reset(d); 281 } 282 283 static inline u32 gt_thread_status(struct intel_uncore *uncore) 284 { 285 u32 val; 286 287 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); 288 val &= GEN6_GT_THREAD_STATUS_CORE_MASK; 289 290 return val; 291 } 292 293 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) 294 { 295 /* 296 * w/a for a sporadic read returning 0 by waiting for the GT 297 * thread to wake up. 298 */ 299 WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), 300 "GT thread status wait timed out\n"); 301 } 302 303 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, 304 enum forcewake_domains fw_domains) 305 { 306 fw_domains_get(uncore, fw_domains); 307 308 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 309 __gen6_gt_wait_for_thread_c0(uncore); 310 } 311 312 static inline u32 fifo_free_entries(struct intel_uncore *uncore) 313 { 314 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); 315 316 return count & GT_FIFO_FREE_ENTRIES_MASK; 317 } 318 319 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) 320 { 321 u32 n; 322 323 /* On VLV, FIFO will be shared by both SW and HW. 324 * So, we need to read the FREE_ENTRIES everytime */ 325 if (IS_VALLEYVIEW(uncore_to_i915(uncore))) 326 n = fifo_free_entries(uncore); 327 else 328 n = uncore->fifo_count; 329 330 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { 331 if (wait_for_atomic((n = fifo_free_entries(uncore)) > 332 GT_FIFO_NUM_RESERVED_ENTRIES, 333 GT_FIFO_TIMEOUT_MS)) { 334 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); 335 return; 336 } 337 } 338 339 uncore->fifo_count = n - 1; 340 } 341 342 static enum hrtimer_restart 343 intel_uncore_fw_release_timer(struct hrtimer *timer) 344 { 345 struct intel_uncore_forcewake_domain *domain = 346 container_of(timer, struct intel_uncore_forcewake_domain, timer); 347 struct intel_uncore *uncore = forcewake_domain_to_uncore(domain); 348 unsigned long irqflags; 349 350 assert_rpm_device_not_suspended(uncore->rpm); 351 352 if (xchg(&domain->active, false)) 353 return HRTIMER_RESTART; 354 355 spin_lock_irqsave(&uncore->lock, irqflags); 356 if (WARN_ON(domain->wake_count == 0)) 357 domain->wake_count++; 358 359 if (--domain->wake_count == 0) 360 uncore->funcs.force_wake_put(uncore, domain->mask); 361 362 spin_unlock_irqrestore(&uncore->lock, irqflags); 363 364 return HRTIMER_NORESTART; 365 } 366 367 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ 368 static unsigned int 369 intel_uncore_forcewake_reset(struct intel_uncore *uncore) 370 { 371 unsigned long irqflags; 372 struct intel_uncore_forcewake_domain *domain; 373 int retry_count = 100; 374 enum forcewake_domains fw, active_domains; 375 376 iosf_mbi_assert_punit_acquired(); 377 378 /* Hold uncore.lock across reset to prevent any register access 379 * with forcewake not set correctly. Wait until all pending 380 * timers are run before holding. 381 */ 382 while (1) { 383 unsigned int tmp; 384 385 active_domains = 0; 386 387 for_each_fw_domain(domain, uncore, tmp) { 388 smp_store_mb(domain->active, false); 389 if (hrtimer_cancel(&domain->timer) == 0) 390 continue; 391 392 intel_uncore_fw_release_timer(&domain->timer); 393 } 394 395 spin_lock_irqsave(&uncore->lock, irqflags); 396 397 for_each_fw_domain(domain, uncore, tmp) { 398 if (hrtimer_active(&domain->timer)) 399 active_domains |= domain->mask; 400 } 401 402 if (active_domains == 0) 403 break; 404 405 if (--retry_count == 0) { 406 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 407 break; 408 } 409 410 spin_unlock_irqrestore(&uncore->lock, irqflags); 411 cond_resched(); 412 } 413 414 WARN_ON(active_domains); 415 416 fw = uncore->fw_domains_active; 417 if (fw) 418 uncore->funcs.force_wake_put(uncore, fw); 419 420 fw_domains_reset(uncore, uncore->fw_domains); 421 assert_forcewakes_inactive(uncore); 422 423 spin_unlock_irqrestore(&uncore->lock, irqflags); 424 425 return fw; /* track the lost user forcewake domains */ 426 } 427 428 static bool 429 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) 430 { 431 u32 dbg; 432 433 dbg = __raw_uncore_read32(uncore, FPGA_DBG); 434 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) 435 return false; 436 437 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 438 439 return true; 440 } 441 442 static bool 443 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) 444 { 445 u32 cer; 446 447 cer = __raw_uncore_read32(uncore, CLAIM_ER); 448 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) 449 return false; 450 451 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); 452 453 return true; 454 } 455 456 static bool 457 gen6_check_for_fifo_debug(struct intel_uncore *uncore) 458 { 459 u32 fifodbg; 460 461 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); 462 463 if (unlikely(fifodbg)) { 464 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); 465 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); 466 } 467 468 return fifodbg; 469 } 470 471 static bool 472 check_for_unclaimed_mmio(struct intel_uncore *uncore) 473 { 474 bool ret = false; 475 476 if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) 477 ret |= fpga_check_for_unclaimed_mmio(uncore); 478 479 if (intel_uncore_has_dbg_unclaimed(uncore)) 480 ret |= vlv_check_for_unclaimed_mmio(uncore); 481 482 if (intel_uncore_has_fifo(uncore)) 483 ret |= gen6_check_for_fifo_debug(uncore); 484 485 return ret; 486 } 487 488 static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, 489 unsigned int restore_forcewake) 490 { 491 /* clear out unclaimed reg detection bit */ 492 if (check_for_unclaimed_mmio(uncore)) 493 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 494 495 /* WaDisableShadowRegForCpd:chv */ 496 if (IS_CHERRYVIEW(uncore_to_i915(uncore))) { 497 __raw_uncore_write32(uncore, GTFIFOCTL, 498 __raw_uncore_read32(uncore, GTFIFOCTL) | 499 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 500 GT_FIFO_CTL_RC6_POLICY_STALL); 501 } 502 503 iosf_mbi_punit_acquire(); 504 intel_uncore_forcewake_reset(uncore); 505 if (restore_forcewake) { 506 spin_lock_irq(&uncore->lock); 507 uncore->funcs.force_wake_get(uncore, restore_forcewake); 508 509 if (intel_uncore_has_fifo(uncore)) 510 uncore->fifo_count = fifo_free_entries(uncore); 511 spin_unlock_irq(&uncore->lock); 512 } 513 iosf_mbi_punit_release(); 514 } 515 516 void intel_uncore_suspend(struct intel_uncore *uncore) 517 { 518 iosf_mbi_punit_acquire(); 519 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 520 &uncore->pmic_bus_access_nb); 521 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); 522 iosf_mbi_punit_release(); 523 } 524 525 void intel_uncore_resume_early(struct intel_uncore *uncore) 526 { 527 unsigned int restore_forcewake; 528 529 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); 530 __intel_uncore_early_sanitize(uncore, restore_forcewake); 531 532 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 533 } 534 535 void intel_uncore_runtime_resume(struct intel_uncore *uncore) 536 { 537 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 538 } 539 540 void intel_uncore_sanitize(struct drm_i915_private *dev_priv) 541 { 542 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 543 intel_sanitize_gt_powersave(dev_priv); 544 } 545 546 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, 547 enum forcewake_domains fw_domains) 548 { 549 struct intel_uncore_forcewake_domain *domain; 550 unsigned int tmp; 551 552 fw_domains &= uncore->fw_domains; 553 554 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 555 if (domain->wake_count++) { 556 fw_domains &= ~domain->mask; 557 domain->active = true; 558 } 559 } 560 561 if (fw_domains) 562 uncore->funcs.force_wake_get(uncore, fw_domains); 563 } 564 565 /** 566 * intel_uncore_forcewake_get - grab forcewake domain references 567 * @uncore: the intel_uncore structure 568 * @fw_domains: forcewake domains to get reference on 569 * 570 * This function can be used get GT's forcewake domain references. 571 * Normal register access will handle the forcewake domains automatically. 572 * However if some sequence requires the GT to not power down a particular 573 * forcewake domains this function should be called at the beginning of the 574 * sequence. And subsequently the reference should be dropped by symmetric 575 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 576 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 577 */ 578 void intel_uncore_forcewake_get(struct intel_uncore *uncore, 579 enum forcewake_domains fw_domains) 580 { 581 unsigned long irqflags; 582 583 if (!uncore->funcs.force_wake_get) 584 return; 585 586 __assert_rpm_wakelock_held(uncore->rpm); 587 588 spin_lock_irqsave(&uncore->lock, irqflags); 589 __intel_uncore_forcewake_get(uncore, fw_domains); 590 spin_unlock_irqrestore(&uncore->lock, irqflags); 591 } 592 593 /** 594 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace 595 * @uncore: the intel_uncore structure 596 * 597 * This function is a wrapper around intel_uncore_forcewake_get() to acquire 598 * the GT powerwell and in the process disable our debugging for the 599 * duration of userspace's bypass. 600 */ 601 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) 602 { 603 spin_lock_irq(&uncore->lock); 604 if (!uncore->user_forcewake.count++) { 605 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); 606 607 /* Save and disable mmio debugging for the user bypass */ 608 uncore->user_forcewake.saved_mmio_check = 609 uncore->unclaimed_mmio_check; 610 uncore->user_forcewake.saved_mmio_debug = 611 i915_modparams.mmio_debug; 612 613 uncore->unclaimed_mmio_check = 0; 614 i915_modparams.mmio_debug = 0; 615 } 616 spin_unlock_irq(&uncore->lock); 617 } 618 619 /** 620 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace 621 * @uncore: the intel_uncore structure 622 * 623 * This function complements intel_uncore_forcewake_user_get() and releases 624 * the GT powerwell taken on behalf of the userspace bypass. 625 */ 626 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) 627 { 628 spin_lock_irq(&uncore->lock); 629 if (!--uncore->user_forcewake.count) { 630 if (intel_uncore_unclaimed_mmio(uncore)) 631 dev_info(uncore_to_i915(uncore)->drm.dev, 632 "Invalid mmio detected during user access\n"); 633 634 uncore->unclaimed_mmio_check = 635 uncore->user_forcewake.saved_mmio_check; 636 i915_modparams.mmio_debug = 637 uncore->user_forcewake.saved_mmio_debug; 638 639 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); 640 } 641 spin_unlock_irq(&uncore->lock); 642 } 643 644 /** 645 * intel_uncore_forcewake_get__locked - grab forcewake domain references 646 * @uncore: the intel_uncore structure 647 * @fw_domains: forcewake domains to get reference on 648 * 649 * See intel_uncore_forcewake_get(). This variant places the onus 650 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 651 */ 652 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, 653 enum forcewake_domains fw_domains) 654 { 655 lockdep_assert_held(&uncore->lock); 656 657 if (!uncore->funcs.force_wake_get) 658 return; 659 660 __intel_uncore_forcewake_get(uncore, fw_domains); 661 } 662 663 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, 664 enum forcewake_domains fw_domains) 665 { 666 struct intel_uncore_forcewake_domain *domain; 667 unsigned int tmp; 668 669 fw_domains &= uncore->fw_domains; 670 671 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { 672 if (WARN_ON(domain->wake_count == 0)) 673 continue; 674 675 if (--domain->wake_count) { 676 domain->active = true; 677 continue; 678 } 679 680 fw_domain_arm_timer(domain); 681 } 682 } 683 684 /** 685 * intel_uncore_forcewake_put - release a forcewake domain reference 686 * @uncore: the intel_uncore structure 687 * @fw_domains: forcewake domains to put references 688 * 689 * This function drops the device-level forcewakes for specified 690 * domains obtained by intel_uncore_forcewake_get(). 691 */ 692 void intel_uncore_forcewake_put(struct intel_uncore *uncore, 693 enum forcewake_domains fw_domains) 694 { 695 unsigned long irqflags; 696 697 if (!uncore->funcs.force_wake_put) 698 return; 699 700 spin_lock_irqsave(&uncore->lock, irqflags); 701 __intel_uncore_forcewake_put(uncore, fw_domains); 702 spin_unlock_irqrestore(&uncore->lock, irqflags); 703 } 704 705 /** 706 * intel_uncore_forcewake_put__locked - grab forcewake domain references 707 * @uncore: the intel_uncore structure 708 * @fw_domains: forcewake domains to get reference on 709 * 710 * See intel_uncore_forcewake_put(). This variant places the onus 711 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 712 */ 713 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, 714 enum forcewake_domains fw_domains) 715 { 716 lockdep_assert_held(&uncore->lock); 717 718 if (!uncore->funcs.force_wake_put) 719 return; 720 721 __intel_uncore_forcewake_put(uncore, fw_domains); 722 } 723 724 void assert_forcewakes_inactive(struct intel_uncore *uncore) 725 { 726 if (!uncore->funcs.force_wake_get) 727 return; 728 729 WARN(uncore->fw_domains_active, 730 "Expected all fw_domains to be inactive, but %08x are still on\n", 731 uncore->fw_domains_active); 732 } 733 734 void assert_forcewakes_active(struct intel_uncore *uncore, 735 enum forcewake_domains fw_domains) 736 { 737 if (!uncore->funcs.force_wake_get) 738 return; 739 740 __assert_rpm_wakelock_held(uncore->rpm); 741 742 fw_domains &= uncore->fw_domains; 743 WARN(fw_domains & ~uncore->fw_domains_active, 744 "Expected %08x fw_domains to be active, but %08x are off\n", 745 fw_domains, fw_domains & ~uncore->fw_domains_active); 746 } 747 748 /* We give fast paths for the really cool registers */ 749 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) 750 751 #define GEN11_NEEDS_FORCE_WAKE(reg) \ 752 ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) 753 754 #define __gen6_reg_read_fw_domains(uncore, offset) \ 755 ({ \ 756 enum forcewake_domains __fwd; \ 757 if (NEEDS_FORCE_WAKE(offset)) \ 758 __fwd = FORCEWAKE_RENDER; \ 759 else \ 760 __fwd = 0; \ 761 __fwd; \ 762 }) 763 764 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) 765 { 766 if (offset < entry->start) 767 return -1; 768 else if (offset > entry->end) 769 return 1; 770 else 771 return 0; 772 } 773 774 /* Copied and "macroized" from lib/bsearch.c */ 775 #define BSEARCH(key, base, num, cmp) ({ \ 776 unsigned int start__ = 0, end__ = (num); \ 777 typeof(base) result__ = NULL; \ 778 while (start__ < end__) { \ 779 unsigned int mid__ = start__ + (end__ - start__) / 2; \ 780 int ret__ = (cmp)((key), (base) + mid__); \ 781 if (ret__ < 0) { \ 782 end__ = mid__; \ 783 } else if (ret__ > 0) { \ 784 start__ = mid__ + 1; \ 785 } else { \ 786 result__ = (base) + mid__; \ 787 break; \ 788 } \ 789 } \ 790 result__; \ 791 }) 792 793 static enum forcewake_domains 794 find_fw_domain(struct intel_uncore *uncore, u32 offset) 795 { 796 const struct intel_forcewake_range *entry; 797 798 entry = BSEARCH(offset, 799 uncore->fw_domains_table, 800 uncore->fw_domains_table_entries, 801 fw_range_cmp); 802 803 if (!entry) 804 return 0; 805 806 /* 807 * The list of FW domains depends on the SKU in gen11+ so we 808 * can't determine it statically. We use FORCEWAKE_ALL and 809 * translate it here to the list of available domains. 810 */ 811 if (entry->domains == FORCEWAKE_ALL) 812 return uncore->fw_domains; 813 814 WARN(entry->domains & ~uncore->fw_domains, 815 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", 816 entry->domains & ~uncore->fw_domains, offset); 817 818 return entry->domains; 819 } 820 821 #define GEN_FW_RANGE(s, e, d) \ 822 { .start = (s), .end = (e), .domains = (d) } 823 824 #define HAS_FWTABLE(dev_priv) \ 825 (INTEL_GEN(dev_priv) >= 9 || \ 826 IS_CHERRYVIEW(dev_priv) || \ 827 IS_VALLEYVIEW(dev_priv)) 828 829 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 830 static const struct intel_forcewake_range __vlv_fw_ranges[] = { 831 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 832 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), 833 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), 834 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 835 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), 836 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), 837 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 838 }; 839 840 #define __fwtable_reg_read_fw_domains(uncore, offset) \ 841 ({ \ 842 enum forcewake_domains __fwd = 0; \ 843 if (NEEDS_FORCE_WAKE((offset))) \ 844 __fwd = find_fw_domain(uncore, offset); \ 845 __fwd; \ 846 }) 847 848 #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ 849 ({ \ 850 enum forcewake_domains __fwd = 0; \ 851 if (GEN11_NEEDS_FORCE_WAKE((offset))) \ 852 __fwd = find_fw_domain(uncore, offset); \ 853 __fwd; \ 854 }) 855 856 /* *Must* be sorted by offset! See intel_shadow_table_check(). */ 857 static const i915_reg_t gen8_shadowed_regs[] = { 858 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 859 GEN6_RPNSWREQ, /* 0xA008 */ 860 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 861 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ 862 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ 863 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 864 /* TODO: Other registers are not yet used */ 865 }; 866 867 static const i915_reg_t gen11_shadowed_regs[] = { 868 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ 869 GEN6_RPNSWREQ, /* 0xA008 */ 870 GEN6_RC_VIDEO_FREQ, /* 0xA00C */ 871 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ 872 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ 873 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ 874 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ 875 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ 876 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ 877 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ 878 /* TODO: Other registers are not yet used */ 879 }; 880 881 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) 882 { 883 u32 offset = i915_mmio_reg_offset(*reg); 884 885 if (key < offset) 886 return -1; 887 else if (key > offset) 888 return 1; 889 else 890 return 0; 891 } 892 893 #define __is_genX_shadowed(x) \ 894 static bool is_gen##x##_shadowed(u32 offset) \ 895 { \ 896 const i915_reg_t *regs = gen##x##_shadowed_regs; \ 897 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \ 898 mmio_reg_cmp); \ 899 } 900 901 __is_genX_shadowed(8) 902 __is_genX_shadowed(11) 903 904 #define __gen8_reg_write_fw_domains(uncore, offset) \ 905 ({ \ 906 enum forcewake_domains __fwd; \ 907 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ 908 __fwd = FORCEWAKE_RENDER; \ 909 else \ 910 __fwd = 0; \ 911 __fwd; \ 912 }) 913 914 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 915 static const struct intel_forcewake_range __chv_fw_ranges[] = { 916 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), 917 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 918 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 919 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 920 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 921 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 922 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), 923 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 924 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 925 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 926 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), 927 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 928 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 929 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), 930 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), 931 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), 932 }; 933 934 #define __fwtable_reg_write_fw_domains(uncore, offset) \ 935 ({ \ 936 enum forcewake_domains __fwd = 0; \ 937 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ 938 __fwd = find_fw_domain(uncore, offset); \ 939 __fwd; \ 940 }) 941 942 #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ 943 ({ \ 944 enum forcewake_domains __fwd = 0; \ 945 if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ 946 __fwd = find_fw_domain(uncore, offset); \ 947 __fwd; \ 948 }) 949 950 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 951 static const struct intel_forcewake_range __gen9_fw_ranges[] = { 952 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 953 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 954 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 955 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 956 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 957 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 958 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 959 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), 960 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), 961 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 962 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 963 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 964 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), 965 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), 966 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), 967 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 968 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 969 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), 970 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 971 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 972 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), 973 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), 974 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), 975 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 976 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), 977 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), 978 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), 979 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), 980 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), 981 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 982 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), 983 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), 984 }; 985 986 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ 987 static const struct intel_forcewake_range __gen11_fw_ranges[] = { 988 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), 989 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ 990 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), 991 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), 992 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), 993 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), 994 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), 995 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), 996 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), 997 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), 998 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), 999 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), 1000 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), 1001 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), 1002 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), 1003 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), 1004 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), 1005 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), 1006 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), 1007 GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER), 1008 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), 1009 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), 1010 GEN_FW_RANGE(0x40000, 0x1bffff, 0), 1011 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), 1012 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), 1013 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), 1014 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), 1015 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), 1016 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), 1017 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) 1018 }; 1019 1020 static void 1021 ilk_dummy_write(struct intel_uncore *uncore) 1022 { 1023 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 1024 * the chip from rc6 before touching it for real. MI_MODE is masked, 1025 * hence harmless to write 0 into. */ 1026 __raw_uncore_write32(uncore, MI_MODE, 0); 1027 } 1028 1029 static void 1030 __unclaimed_reg_debug(struct intel_uncore *uncore, 1031 const i915_reg_t reg, 1032 const bool read, 1033 const bool before) 1034 { 1035 if (WARN(check_for_unclaimed_mmio(uncore) && !before, 1036 "Unclaimed %s register 0x%x\n", 1037 read ? "read from" : "write to", 1038 i915_mmio_reg_offset(reg))) 1039 /* Only report the first N failures */ 1040 i915_modparams.mmio_debug--; 1041 } 1042 1043 static inline void 1044 unclaimed_reg_debug(struct intel_uncore *uncore, 1045 const i915_reg_t reg, 1046 const bool read, 1047 const bool before) 1048 { 1049 if (likely(!i915_modparams.mmio_debug)) 1050 return; 1051 1052 __unclaimed_reg_debug(uncore, reg, read, before); 1053 } 1054 1055 #define GEN2_READ_HEADER(x) \ 1056 u##x val = 0; \ 1057 __assert_rpm_wakelock_held(uncore->rpm); 1058 1059 #define GEN2_READ_FOOTER \ 1060 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1061 return val 1062 1063 #define __gen2_read(x) \ 1064 static u##x \ 1065 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1066 GEN2_READ_HEADER(x); \ 1067 val = __raw_uncore_read##x(uncore, reg); \ 1068 GEN2_READ_FOOTER; \ 1069 } 1070 1071 #define __gen5_read(x) \ 1072 static u##x \ 1073 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1074 GEN2_READ_HEADER(x); \ 1075 ilk_dummy_write(uncore); \ 1076 val = __raw_uncore_read##x(uncore, reg); \ 1077 GEN2_READ_FOOTER; \ 1078 } 1079 1080 __gen5_read(8) 1081 __gen5_read(16) 1082 __gen5_read(32) 1083 __gen5_read(64) 1084 __gen2_read(8) 1085 __gen2_read(16) 1086 __gen2_read(32) 1087 __gen2_read(64) 1088 1089 #undef __gen5_read 1090 #undef __gen2_read 1091 1092 #undef GEN2_READ_FOOTER 1093 #undef GEN2_READ_HEADER 1094 1095 #define GEN6_READ_HEADER(x) \ 1096 u32 offset = i915_mmio_reg_offset(reg); \ 1097 unsigned long irqflags; \ 1098 u##x val = 0; \ 1099 __assert_rpm_wakelock_held(uncore->rpm); \ 1100 spin_lock_irqsave(&uncore->lock, irqflags); \ 1101 unclaimed_reg_debug(uncore, reg, true, true) 1102 1103 #define GEN6_READ_FOOTER \ 1104 unclaimed_reg_debug(uncore, reg, true, false); \ 1105 spin_unlock_irqrestore(&uncore->lock, irqflags); \ 1106 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 1107 return val 1108 1109 static noinline void ___force_wake_auto(struct intel_uncore *uncore, 1110 enum forcewake_domains fw_domains) 1111 { 1112 struct intel_uncore_forcewake_domain *domain; 1113 unsigned int tmp; 1114 1115 GEM_BUG_ON(fw_domains & ~uncore->fw_domains); 1116 1117 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) 1118 fw_domain_arm_timer(domain); 1119 1120 uncore->funcs.force_wake_get(uncore, fw_domains); 1121 } 1122 1123 static inline void __force_wake_auto(struct intel_uncore *uncore, 1124 enum forcewake_domains fw_domains) 1125 { 1126 if (WARN_ON(!fw_domains)) 1127 return; 1128 1129 /* Turn on all requested but inactive supported forcewake domains. */ 1130 fw_domains &= uncore->fw_domains; 1131 fw_domains &= ~uncore->fw_domains_active; 1132 1133 if (fw_domains) 1134 ___force_wake_auto(uncore, fw_domains); 1135 } 1136 1137 #define __gen_read(func, x) \ 1138 static u##x \ 1139 func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ 1140 enum forcewake_domains fw_engine; \ 1141 GEN6_READ_HEADER(x); \ 1142 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ 1143 if (fw_engine) \ 1144 __force_wake_auto(uncore, fw_engine); \ 1145 val = __raw_uncore_read##x(uncore, reg); \ 1146 GEN6_READ_FOOTER; \ 1147 } 1148 #define __gen6_read(x) __gen_read(gen6, x) 1149 #define __fwtable_read(x) __gen_read(fwtable, x) 1150 #define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x) 1151 1152 __gen11_fwtable_read(8) 1153 __gen11_fwtable_read(16) 1154 __gen11_fwtable_read(32) 1155 __gen11_fwtable_read(64) 1156 __fwtable_read(8) 1157 __fwtable_read(16) 1158 __fwtable_read(32) 1159 __fwtable_read(64) 1160 __gen6_read(8) 1161 __gen6_read(16) 1162 __gen6_read(32) 1163 __gen6_read(64) 1164 1165 #undef __gen11_fwtable_read 1166 #undef __fwtable_read 1167 #undef __gen6_read 1168 #undef GEN6_READ_FOOTER 1169 #undef GEN6_READ_HEADER 1170 1171 #define GEN2_WRITE_HEADER \ 1172 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1173 __assert_rpm_wakelock_held(uncore->rpm); \ 1174 1175 #define GEN2_WRITE_FOOTER 1176 1177 #define __gen2_write(x) \ 1178 static void \ 1179 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1180 GEN2_WRITE_HEADER; \ 1181 __raw_uncore_write##x(uncore, reg, val); \ 1182 GEN2_WRITE_FOOTER; \ 1183 } 1184 1185 #define __gen5_write(x) \ 1186 static void \ 1187 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1188 GEN2_WRITE_HEADER; \ 1189 ilk_dummy_write(uncore); \ 1190 __raw_uncore_write##x(uncore, reg, val); \ 1191 GEN2_WRITE_FOOTER; \ 1192 } 1193 1194 __gen5_write(8) 1195 __gen5_write(16) 1196 __gen5_write(32) 1197 __gen2_write(8) 1198 __gen2_write(16) 1199 __gen2_write(32) 1200 1201 #undef __gen5_write 1202 #undef __gen2_write 1203 1204 #undef GEN2_WRITE_FOOTER 1205 #undef GEN2_WRITE_HEADER 1206 1207 #define GEN6_WRITE_HEADER \ 1208 u32 offset = i915_mmio_reg_offset(reg); \ 1209 unsigned long irqflags; \ 1210 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 1211 __assert_rpm_wakelock_held(uncore->rpm); \ 1212 spin_lock_irqsave(&uncore->lock, irqflags); \ 1213 unclaimed_reg_debug(uncore, reg, false, true) 1214 1215 #define GEN6_WRITE_FOOTER \ 1216 unclaimed_reg_debug(uncore, reg, false, false); \ 1217 spin_unlock_irqrestore(&uncore->lock, irqflags) 1218 1219 #define __gen6_write(x) \ 1220 static void \ 1221 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1222 GEN6_WRITE_HEADER; \ 1223 if (NEEDS_FORCE_WAKE(offset)) \ 1224 __gen6_gt_wait_for_fifo(uncore); \ 1225 __raw_uncore_write##x(uncore, reg, val); \ 1226 GEN6_WRITE_FOOTER; \ 1227 } 1228 1229 #define __gen_write(func, x) \ 1230 static void \ 1231 func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ 1232 enum forcewake_domains fw_engine; \ 1233 GEN6_WRITE_HEADER; \ 1234 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ 1235 if (fw_engine) \ 1236 __force_wake_auto(uncore, fw_engine); \ 1237 __raw_uncore_write##x(uncore, reg, val); \ 1238 GEN6_WRITE_FOOTER; \ 1239 } 1240 #define __gen8_write(x) __gen_write(gen8, x) 1241 #define __fwtable_write(x) __gen_write(fwtable, x) 1242 #define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x) 1243 1244 __gen11_fwtable_write(8) 1245 __gen11_fwtable_write(16) 1246 __gen11_fwtable_write(32) 1247 __fwtable_write(8) 1248 __fwtable_write(16) 1249 __fwtable_write(32) 1250 __gen8_write(8) 1251 __gen8_write(16) 1252 __gen8_write(32) 1253 __gen6_write(8) 1254 __gen6_write(16) 1255 __gen6_write(32) 1256 1257 #undef __gen11_fwtable_write 1258 #undef __fwtable_write 1259 #undef __gen8_write 1260 #undef __gen6_write 1261 #undef GEN6_WRITE_FOOTER 1262 #undef GEN6_WRITE_HEADER 1263 1264 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ 1265 do { \ 1266 (uncore)->funcs.mmio_writeb = x##_write8; \ 1267 (uncore)->funcs.mmio_writew = x##_write16; \ 1268 (uncore)->funcs.mmio_writel = x##_write32; \ 1269 } while (0) 1270 1271 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ 1272 do { \ 1273 (uncore)->funcs.mmio_readb = x##_read8; \ 1274 (uncore)->funcs.mmio_readw = x##_read16; \ 1275 (uncore)->funcs.mmio_readl = x##_read32; \ 1276 (uncore)->funcs.mmio_readq = x##_read64; \ 1277 } while (0) 1278 1279 1280 static void fw_domain_init(struct intel_uncore *uncore, 1281 enum forcewake_domain_id domain_id, 1282 i915_reg_t reg_set, 1283 i915_reg_t reg_ack) 1284 { 1285 struct intel_uncore_forcewake_domain *d; 1286 1287 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1288 return; 1289 1290 d = &uncore->fw_domain[domain_id]; 1291 1292 WARN_ON(d->wake_count); 1293 1294 WARN_ON(!i915_mmio_reg_valid(reg_set)); 1295 WARN_ON(!i915_mmio_reg_valid(reg_ack)); 1296 1297 d->wake_count = 0; 1298 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); 1299 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); 1300 1301 d->id = domain_id; 1302 1303 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1304 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1305 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1306 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); 1307 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); 1308 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2)); 1309 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3)); 1310 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); 1311 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); 1312 1313 1314 d->mask = BIT(domain_id); 1315 1316 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1317 d->timer.function = intel_uncore_fw_release_timer; 1318 1319 uncore->fw_domains |= BIT(domain_id); 1320 1321 fw_domain_reset(d); 1322 } 1323 1324 static void fw_domain_fini(struct intel_uncore *uncore, 1325 enum forcewake_domain_id domain_id) 1326 { 1327 struct intel_uncore_forcewake_domain *d; 1328 1329 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1330 return; 1331 1332 d = &uncore->fw_domain[domain_id]; 1333 1334 WARN_ON(d->wake_count); 1335 WARN_ON(hrtimer_cancel(&d->timer)); 1336 memset(d, 0, sizeof(*d)); 1337 1338 uncore->fw_domains &= ~BIT(domain_id); 1339 } 1340 1341 static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) 1342 { 1343 struct drm_i915_private *i915 = uncore_to_i915(uncore); 1344 1345 if (!intel_uncore_has_forcewake(uncore)) 1346 return; 1347 1348 if (INTEL_GEN(i915) >= 11) { 1349 int i; 1350 1351 uncore->funcs.force_wake_get = 1352 fw_domains_get_with_fallback; 1353 uncore->funcs.force_wake_put = fw_domains_put; 1354 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1355 FORCEWAKE_RENDER_GEN9, 1356 FORCEWAKE_ACK_RENDER_GEN9); 1357 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, 1358 FORCEWAKE_BLITTER_GEN9, 1359 FORCEWAKE_ACK_BLITTER_GEN9); 1360 for (i = 0; i < I915_MAX_VCS; i++) { 1361 if (!HAS_ENGINE(i915, _VCS(i))) 1362 continue; 1363 1364 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, 1365 FORCEWAKE_MEDIA_VDBOX_GEN11(i), 1366 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); 1367 } 1368 for (i = 0; i < I915_MAX_VECS; i++) { 1369 if (!HAS_ENGINE(i915, _VECS(i))) 1370 continue; 1371 1372 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, 1373 FORCEWAKE_MEDIA_VEBOX_GEN11(i), 1374 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); 1375 } 1376 } else if (IS_GEN_RANGE(i915, 9, 10)) { 1377 uncore->funcs.force_wake_get = 1378 fw_domains_get_with_fallback; 1379 uncore->funcs.force_wake_put = fw_domains_put; 1380 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1381 FORCEWAKE_RENDER_GEN9, 1382 FORCEWAKE_ACK_RENDER_GEN9); 1383 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, 1384 FORCEWAKE_BLITTER_GEN9, 1385 FORCEWAKE_ACK_BLITTER_GEN9); 1386 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 1387 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1388 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 1389 uncore->funcs.force_wake_get = fw_domains_get; 1390 uncore->funcs.force_wake_put = fw_domains_put; 1391 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1392 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1393 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, 1394 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1395 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 1396 uncore->funcs.force_wake_get = 1397 fw_domains_get_with_thread_status; 1398 uncore->funcs.force_wake_put = fw_domains_put; 1399 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1400 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1401 } else if (IS_IVYBRIDGE(i915)) { 1402 u32 ecobus; 1403 1404 /* IVB configs may use multi-threaded forcewake */ 1405 1406 /* A small trick here - if the bios hasn't configured 1407 * MT forcewake, and if the device is in RC6, then 1408 * force_wake_mt_get will not wake the device and the 1409 * ECOBUS read will return zero. Which will be 1410 * (correctly) interpreted by the test below as MT 1411 * forcewake being disabled. 1412 */ 1413 uncore->funcs.force_wake_get = 1414 fw_domains_get_with_thread_status; 1415 uncore->funcs.force_wake_put = fw_domains_put; 1416 1417 /* We need to init first for ECOBUS access and then 1418 * determine later if we want to reinit, in case of MT access is 1419 * not working. In this stage we don't know which flavour this 1420 * ivb is, so it is better to reset also the gen6 fw registers 1421 * before the ecobus check. 1422 */ 1423 1424 __raw_uncore_write32(uncore, FORCEWAKE, 0); 1425 __raw_posting_read(uncore, ECOBUS); 1426 1427 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1428 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1429 1430 spin_lock_irq(&uncore->lock); 1431 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); 1432 ecobus = __raw_uncore_read32(uncore, ECOBUS); 1433 fw_domains_put(uncore, FORCEWAKE_RENDER); 1434 spin_unlock_irq(&uncore->lock); 1435 1436 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1437 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1438 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1439 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1440 FORCEWAKE, FORCEWAKE_ACK); 1441 } 1442 } else if (IS_GEN(i915, 6)) { 1443 uncore->funcs.force_wake_get = 1444 fw_domains_get_with_thread_status; 1445 uncore->funcs.force_wake_put = fw_domains_put; 1446 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, 1447 FORCEWAKE, FORCEWAKE_ACK); 1448 } 1449 1450 /* All future platforms are expected to require complex power gating */ 1451 WARN_ON(uncore->fw_domains == 0); 1452 } 1453 1454 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ 1455 { \ 1456 (uncore)->fw_domains_table = \ 1457 (struct intel_forcewake_range *)(d); \ 1458 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ 1459 } 1460 1461 static int i915_pmic_bus_access_notifier(struct notifier_block *nb, 1462 unsigned long action, void *data) 1463 { 1464 struct drm_i915_private *dev_priv = container_of(nb, 1465 struct drm_i915_private, uncore.pmic_bus_access_nb); 1466 1467 switch (action) { 1468 case MBI_PMIC_BUS_ACCESS_BEGIN: 1469 /* 1470 * forcewake all now to make sure that we don't need to do a 1471 * forcewake later which on systems where this notifier gets 1472 * called requires the punit to access to the shared pmic i2c 1473 * bus, which will be busy after this notification, leading to: 1474 * "render: timed out waiting for forcewake ack request." 1475 * errors. 1476 * 1477 * The notifier is unregistered during intel_runtime_suspend(), 1478 * so it's ok to access the HW here without holding a RPM 1479 * wake reference -> disable wakeref asserts for the time of 1480 * the access. 1481 */ 1482 disable_rpm_wakeref_asserts(dev_priv); 1483 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 1484 enable_rpm_wakeref_asserts(dev_priv); 1485 break; 1486 case MBI_PMIC_BUS_ACCESS_END: 1487 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1488 break; 1489 } 1490 1491 return NOTIFY_OK; 1492 } 1493 1494 static int uncore_mmio_setup(struct intel_uncore *uncore) 1495 { 1496 struct drm_i915_private *i915 = uncore_to_i915(uncore); 1497 struct pci_dev *pdev = i915->drm.pdev; 1498 int mmio_bar; 1499 int mmio_size; 1500 1501 mmio_bar = IS_GEN(i915, 2) ? 1 : 0; 1502 /* 1503 * Before gen4, the registers and the GTT are behind different BARs. 1504 * However, from gen4 onwards, the registers and the GTT are shared 1505 * in the same BAR, so we want to restrict this ioremap from 1506 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 1507 * the register BAR remains the same size for all the earlier 1508 * generations up to Ironlake. 1509 */ 1510 if (INTEL_GEN(i915) < 5) 1511 mmio_size = 512 * 1024; 1512 else 1513 mmio_size = 2 * 1024 * 1024; 1514 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); 1515 if (uncore->regs == NULL) { 1516 DRM_ERROR("failed to map registers\n"); 1517 1518 return -EIO; 1519 } 1520 1521 return 0; 1522 } 1523 1524 static void uncore_mmio_cleanup(struct intel_uncore *uncore) 1525 { 1526 struct drm_i915_private *i915 = uncore_to_i915(uncore); 1527 struct pci_dev *pdev = i915->drm.pdev; 1528 1529 pci_iounmap(pdev, uncore->regs); 1530 } 1531 1532 void intel_uncore_init_early(struct intel_uncore *uncore) 1533 { 1534 spin_lock_init(&uncore->lock); 1535 } 1536 1537 int intel_uncore_init_mmio(struct intel_uncore *uncore) 1538 { 1539 struct drm_i915_private *i915 = uncore_to_i915(uncore); 1540 int ret; 1541 1542 ret = uncore_mmio_setup(uncore); 1543 if (ret) 1544 return ret; 1545 1546 i915_check_vgpu(i915); 1547 1548 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) 1549 uncore->flags |= UNCORE_HAS_FORCEWAKE; 1550 1551 intel_uncore_fw_domains_init(uncore); 1552 __intel_uncore_early_sanitize(uncore, 0); 1553 1554 uncore->unclaimed_mmio_check = 1; 1555 uncore->pmic_bus_access_nb.notifier_call = 1556 i915_pmic_bus_access_notifier; 1557 1558 uncore->rpm = &i915->runtime_pm; 1559 1560 if (!intel_uncore_has_forcewake(uncore)) { 1561 if (IS_GEN(i915, 5)) { 1562 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5); 1563 ASSIGN_READ_MMIO_VFUNCS(uncore, gen5); 1564 } else { 1565 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2); 1566 ASSIGN_READ_MMIO_VFUNCS(uncore, gen2); 1567 } 1568 } else if (IS_GEN_RANGE(i915, 6, 7)) { 1569 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); 1570 1571 if (IS_VALLEYVIEW(i915)) { 1572 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); 1573 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1574 } else { 1575 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); 1576 } 1577 } else if (IS_GEN(i915, 8)) { 1578 if (IS_CHERRYVIEW(i915)) { 1579 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); 1580 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 1581 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1582 1583 } else { 1584 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); 1585 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); 1586 } 1587 } else if (IS_GEN_RANGE(i915, 9, 10)) { 1588 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); 1589 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 1590 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); 1591 } else { 1592 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); 1593 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); 1594 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); 1595 } 1596 1597 if (HAS_FPGA_DBG_UNCLAIMED(i915)) 1598 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; 1599 1600 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1601 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; 1602 1603 if (IS_GEN_RANGE(i915, 6, 7)) 1604 uncore->flags |= UNCORE_HAS_FIFO; 1605 1606 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); 1607 1608 return 0; 1609 } 1610 1611 /* 1612 * We might have detected that some engines are fused off after we initialized 1613 * the forcewake domains. Prune them, to make sure they only reference existing 1614 * engines. 1615 */ 1616 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) 1617 { 1618 struct drm_i915_private *i915 = uncore_to_i915(uncore); 1619 1620 if (INTEL_GEN(i915) >= 11) { 1621 enum forcewake_domains fw_domains = uncore->fw_domains; 1622 enum forcewake_domain_id domain_id; 1623 int i; 1624 1625 for (i = 0; i < I915_MAX_VCS; i++) { 1626 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; 1627 1628 if (HAS_ENGINE(i915, _VCS(i))) 1629 continue; 1630 1631 if (fw_domains & BIT(domain_id)) 1632 fw_domain_fini(uncore, domain_id); 1633 } 1634 1635 for (i = 0; i < I915_MAX_VECS; i++) { 1636 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; 1637 1638 if (HAS_ENGINE(i915, _VECS(i))) 1639 continue; 1640 1641 if (fw_domains & BIT(domain_id)) 1642 fw_domain_fini(uncore, domain_id); 1643 } 1644 } 1645 } 1646 1647 void intel_uncore_fini_mmio(struct intel_uncore *uncore) 1648 { 1649 /* Paranoia: make sure we have disabled everything before we exit. */ 1650 intel_uncore_sanitize(uncore_to_i915(uncore)); 1651 1652 iosf_mbi_punit_acquire(); 1653 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( 1654 &uncore->pmic_bus_access_nb); 1655 intel_uncore_forcewake_reset(uncore); 1656 iosf_mbi_punit_release(); 1657 uncore_mmio_cleanup(uncore); 1658 } 1659 1660 static const struct reg_whitelist { 1661 i915_reg_t offset_ldw; 1662 i915_reg_t offset_udw; 1663 u16 gen_mask; 1664 u8 size; 1665 } reg_read_whitelist[] = { { 1666 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), 1667 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), 1668 .gen_mask = INTEL_GEN_MASK(4, 11), 1669 .size = 8 1670 } }; 1671 1672 int i915_reg_read_ioctl(struct drm_device *dev, 1673 void *data, struct drm_file *file) 1674 { 1675 struct drm_i915_private *dev_priv = to_i915(dev); 1676 struct drm_i915_reg_read *reg = data; 1677 struct reg_whitelist const *entry; 1678 intel_wakeref_t wakeref; 1679 unsigned int flags; 1680 int remain; 1681 int ret = 0; 1682 1683 entry = reg_read_whitelist; 1684 remain = ARRAY_SIZE(reg_read_whitelist); 1685 while (remain) { 1686 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); 1687 1688 GEM_BUG_ON(!is_power_of_2(entry->size)); 1689 GEM_BUG_ON(entry->size > 8); 1690 GEM_BUG_ON(entry_offset & (entry->size - 1)); 1691 1692 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && 1693 entry_offset == (reg->offset & -entry->size)) 1694 break; 1695 entry++; 1696 remain--; 1697 } 1698 1699 if (!remain) 1700 return -EINVAL; 1701 1702 flags = reg->offset & (entry->size - 1); 1703 1704 with_intel_runtime_pm(dev_priv, wakeref) { 1705 if (entry->size == 8 && flags == I915_REG_READ_8B_WA) 1706 reg->val = I915_READ64_2x32(entry->offset_ldw, 1707 entry->offset_udw); 1708 else if (entry->size == 8 && flags == 0) 1709 reg->val = I915_READ64(entry->offset_ldw); 1710 else if (entry->size == 4 && flags == 0) 1711 reg->val = I915_READ(entry->offset_ldw); 1712 else if (entry->size == 2 && flags == 0) 1713 reg->val = I915_READ16(entry->offset_ldw); 1714 else if (entry->size == 1 && flags == 0) 1715 reg->val = I915_READ8(entry->offset_ldw); 1716 else 1717 ret = -EINVAL; 1718 } 1719 1720 return ret; 1721 } 1722 1723 /** 1724 * __intel_wait_for_register_fw - wait until register matches expected state 1725 * @uncore: the struct intel_uncore 1726 * @reg: the register to read 1727 * @mask: mask to apply to register value 1728 * @value: expected value 1729 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 1730 * @slow_timeout_ms: slow timeout in millisecond 1731 * @out_value: optional placeholder to hold registry value 1732 * 1733 * This routine waits until the target register @reg contains the expected 1734 * @value after applying the @mask, i.e. it waits until :: 1735 * 1736 * (I915_READ_FW(reg) & mask) == value 1737 * 1738 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. 1739 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us 1740 * must be not larger than 20,0000 microseconds. 1741 * 1742 * Note that this routine assumes the caller holds forcewake asserted, it is 1743 * not suitable for very long waits. See intel_wait_for_register() if you 1744 * wish to wait without holding forcewake for the duration (i.e. you expect 1745 * the wait to be slow). 1746 * 1747 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1748 */ 1749 int __intel_wait_for_register_fw(struct intel_uncore *uncore, 1750 i915_reg_t reg, 1751 u32 mask, 1752 u32 value, 1753 unsigned int fast_timeout_us, 1754 unsigned int slow_timeout_ms, 1755 u32 *out_value) 1756 { 1757 u32 uninitialized_var(reg_value); 1758 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) 1759 int ret; 1760 1761 /* Catch any overuse of this function */ 1762 might_sleep_if(slow_timeout_ms); 1763 GEM_BUG_ON(fast_timeout_us > 20000); 1764 1765 ret = -ETIMEDOUT; 1766 if (fast_timeout_us && fast_timeout_us <= 20000) 1767 ret = _wait_for_atomic(done, fast_timeout_us, 0); 1768 if (ret && slow_timeout_ms) 1769 ret = wait_for(done, slow_timeout_ms); 1770 1771 if (out_value) 1772 *out_value = reg_value; 1773 1774 return ret; 1775 #undef done 1776 } 1777 1778 /** 1779 * __intel_wait_for_register - wait until register matches expected state 1780 * @uncore: the struct intel_uncore 1781 * @reg: the register to read 1782 * @mask: mask to apply to register value 1783 * @value: expected value 1784 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait 1785 * @slow_timeout_ms: slow timeout in millisecond 1786 * @out_value: optional placeholder to hold registry value 1787 * 1788 * This routine waits until the target register @reg contains the expected 1789 * @value after applying the @mask, i.e. it waits until :: 1790 * 1791 * (I915_READ(reg) & mask) == value 1792 * 1793 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 1794 * 1795 * Returns 0 if the register matches the desired condition, or -ETIMEOUT. 1796 */ 1797 int __intel_wait_for_register(struct intel_uncore *uncore, 1798 i915_reg_t reg, 1799 u32 mask, 1800 u32 value, 1801 unsigned int fast_timeout_us, 1802 unsigned int slow_timeout_ms, 1803 u32 *out_value) 1804 { 1805 unsigned fw = 1806 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); 1807 u32 reg_value; 1808 int ret; 1809 1810 might_sleep_if(slow_timeout_ms); 1811 1812 spin_lock_irq(&uncore->lock); 1813 intel_uncore_forcewake_get__locked(uncore, fw); 1814 1815 ret = __intel_wait_for_register_fw(uncore, 1816 reg, mask, value, 1817 fast_timeout_us, 0, ®_value); 1818 1819 intel_uncore_forcewake_put__locked(uncore, fw); 1820 spin_unlock_irq(&uncore->lock); 1821 1822 if (ret && slow_timeout_ms) 1823 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, 1824 reg), 1825 (reg_value & mask) == value, 1826 slow_timeout_ms * 1000, 10, 1000); 1827 1828 /* just trace the final value */ 1829 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); 1830 1831 if (out_value) 1832 *out_value = reg_value; 1833 1834 return ret; 1835 } 1836 1837 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) 1838 { 1839 return check_for_unclaimed_mmio(uncore); 1840 } 1841 1842 bool 1843 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) 1844 { 1845 bool ret = false; 1846 1847 spin_lock_irq(&uncore->lock); 1848 1849 if (unlikely(uncore->unclaimed_mmio_check <= 0)) 1850 goto out; 1851 1852 if (unlikely(intel_uncore_unclaimed_mmio(uncore))) { 1853 if (!i915_modparams.mmio_debug) { 1854 DRM_DEBUG("Unclaimed register detected, " 1855 "enabling oneshot unclaimed register reporting. " 1856 "Please use i915.mmio_debug=N for more information.\n"); 1857 i915_modparams.mmio_debug++; 1858 } 1859 uncore->unclaimed_mmio_check--; 1860 ret = true; 1861 } 1862 1863 out: 1864 spin_unlock_irq(&uncore->lock); 1865 1866 return ret; 1867 } 1868 1869 static enum forcewake_domains 1870 intel_uncore_forcewake_for_read(struct intel_uncore *uncore, 1871 i915_reg_t reg) 1872 { 1873 struct drm_i915_private *i915 = uncore_to_i915(uncore); 1874 u32 offset = i915_mmio_reg_offset(reg); 1875 enum forcewake_domains fw_domains; 1876 1877 if (INTEL_GEN(i915) >= 11) { 1878 fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset); 1879 } else if (HAS_FWTABLE(i915)) { 1880 fw_domains = __fwtable_reg_read_fw_domains(uncore, offset); 1881 } else if (INTEL_GEN(i915) >= 6) { 1882 fw_domains = __gen6_reg_read_fw_domains(uncore, offset); 1883 } else { 1884 /* on devices with FW we expect to hit one of the above cases */ 1885 if (intel_uncore_has_forcewake(uncore)) 1886 MISSING_CASE(INTEL_GEN(i915)); 1887 1888 fw_domains = 0; 1889 } 1890 1891 WARN_ON(fw_domains & ~uncore->fw_domains); 1892 1893 return fw_domains; 1894 } 1895 1896 static enum forcewake_domains 1897 intel_uncore_forcewake_for_write(struct intel_uncore *uncore, 1898 i915_reg_t reg) 1899 { 1900 struct drm_i915_private *i915 = uncore_to_i915(uncore); 1901 u32 offset = i915_mmio_reg_offset(reg); 1902 enum forcewake_domains fw_domains; 1903 1904 if (INTEL_GEN(i915) >= 11) { 1905 fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset); 1906 } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) { 1907 fw_domains = __fwtable_reg_write_fw_domains(uncore, offset); 1908 } else if (IS_GEN(i915, 8)) { 1909 fw_domains = __gen8_reg_write_fw_domains(uncore, offset); 1910 } else if (IS_GEN_RANGE(i915, 6, 7)) { 1911 fw_domains = FORCEWAKE_RENDER; 1912 } else { 1913 /* on devices with FW we expect to hit one of the above cases */ 1914 if (intel_uncore_has_forcewake(uncore)) 1915 MISSING_CASE(INTEL_GEN(i915)); 1916 1917 fw_domains = 0; 1918 } 1919 1920 WARN_ON(fw_domains & ~uncore->fw_domains); 1921 1922 return fw_domains; 1923 } 1924 1925 /** 1926 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access 1927 * a register 1928 * @uncore: pointer to struct intel_uncore 1929 * @reg: register in question 1930 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE 1931 * 1932 * Returns a set of forcewake domains required to be taken with for example 1933 * intel_uncore_forcewake_get for the specified register to be accessible in the 1934 * specified mode (read, write or read/write) with raw mmio accessors. 1935 * 1936 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the 1937 * callers to do FIFO management on their own or risk losing writes. 1938 */ 1939 enum forcewake_domains 1940 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, 1941 i915_reg_t reg, unsigned int op) 1942 { 1943 enum forcewake_domains fw_domains = 0; 1944 1945 WARN_ON(!op); 1946 1947 if (!intel_uncore_has_forcewake(uncore)) 1948 return 0; 1949 1950 if (op & FW_REG_READ) 1951 fw_domains = intel_uncore_forcewake_for_read(uncore, reg); 1952 1953 if (op & FW_REG_WRITE) 1954 fw_domains |= intel_uncore_forcewake_for_write(uncore, reg); 1955 1956 return fw_domains; 1957 } 1958 1959 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1960 #include "selftests/mock_uncore.c" 1961 #include "selftests/intel_uncore.c" 1962 #endif 1963