1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/intel/i915_drm.h> 9 #include <drm/intel/display_parent_interface.h> 10 #include <drm/intel/intel_pcode_regs.h> 11 12 #include "display/intel_display_rps.h" 13 #include "display/vlv_clock.h" 14 15 #include "i915_drv.h" 16 #include "i915_freq.h" 17 #include "i915_irq.h" 18 #include "i915_reg.h" 19 #include "i915_wait_util.h" 20 #include "intel_breadcrumbs.h" 21 #include "intel_gt.h" 22 #include "intel_gt_clock_utils.h" 23 #include "intel_gt_irq.h" 24 #include "intel_gt_pm.h" 25 #include "intel_gt_pm_irq.h" 26 #include "intel_gt_print.h" 27 #include "intel_gt_regs.h" 28 #include "intel_mchbar_regs.h" 29 #include "intel_pcode.h" 30 #include "intel_rps.h" 31 #include "vlv_iosf_sb.h" 32 #include "../../../platform/x86/intel_ips.h" 33 34 #define BUSY_MAX_EI 20u /* ms */ 35 36 /* 37 * Lock protecting IPS related data structures 38 */ 39 static DEFINE_SPINLOCK(mchdev_lock); 40 41 static struct intel_gt *rps_to_gt(struct intel_rps *rps) 42 { 43 return container_of(rps, struct intel_gt, rps); 44 } 45 46 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) 47 { 48 return rps_to_gt(rps)->i915; 49 } 50 51 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) 52 { 53 return rps_to_gt(rps)->uncore; 54 } 55 56 static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps) 57 { 58 struct intel_gt *gt = rps_to_gt(rps); 59 60 return >_to_guc(gt)->slpc; 61 } 62 63 static bool rps_uses_slpc(struct intel_rps *rps) 64 { 65 struct intel_gt *gt = rps_to_gt(rps); 66 67 return intel_uc_uses_guc_slpc(>->uc); 68 } 69 70 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) 71 { 72 return mask & ~rps->pm_intrmsk_mbz; 73 } 74 75 static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 76 { 77 intel_uncore_write_fw(uncore, reg, val); 78 } 79 80 static void rps_timer(struct timer_list *t) 81 { 82 struct intel_rps *rps = timer_container_of(rps, t, timer); 83 struct intel_gt *gt = rps_to_gt(rps); 84 struct intel_engine_cs *engine; 85 ktime_t dt, last, timestamp; 86 enum intel_engine_id id; 87 s64 max_busy[3] = {}; 88 89 timestamp = 0; 90 for_each_engine(engine, gt, id) { 91 s64 busy; 92 int i; 93 94 dt = intel_engine_get_busy_time(engine, ×tamp); 95 last = engine->stats.rps; 96 engine->stats.rps = dt; 97 98 busy = ktime_to_ns(ktime_sub(dt, last)); 99 for (i = 0; i < ARRAY_SIZE(max_busy); i++) { 100 if (busy > max_busy[i]) 101 swap(busy, max_busy[i]); 102 } 103 } 104 last = rps->pm_timestamp; 105 rps->pm_timestamp = timestamp; 106 107 if (intel_rps_is_active(rps)) { 108 s64 busy; 109 int i; 110 111 dt = ktime_sub(timestamp, last); 112 113 /* 114 * Our goal is to evaluate each engine independently, so we run 115 * at the lowest clocks required to sustain the heaviest 116 * workload. However, a task may be split into sequential 117 * dependent operations across a set of engines, such that 118 * the independent contributions do not account for high load, 119 * but overall the task is GPU bound. For example, consider 120 * video decode on vcs followed by colour post-processing 121 * on vecs, followed by general post-processing on rcs. 122 * Since multi-engines being active does imply a single 123 * continuous workload across all engines, we hedge our 124 * bets by only contributing a factor of the distributed 125 * load into our busyness calculation. 126 */ 127 busy = max_busy[0]; 128 for (i = 1; i < ARRAY_SIZE(max_busy); i++) { 129 if (!max_busy[i]) 130 break; 131 132 busy += div_u64(max_busy[i], 1 << i); 133 } 134 GT_TRACE(gt, 135 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", 136 busy, (int)div64_u64(100 * busy, dt), 137 max_busy[0], max_busy[1], max_busy[2], 138 rps->pm_interval); 139 140 if (100 * busy > rps->power.up_threshold * dt && 141 rps->cur_freq < rps->max_freq_softlimit) { 142 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; 143 rps->pm_interval = 1; 144 queue_work(gt->i915->unordered_wq, &rps->work); 145 } else if (100 * busy < rps->power.down_threshold * dt && 146 rps->cur_freq > rps->min_freq_softlimit) { 147 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; 148 rps->pm_interval = 1; 149 queue_work(gt->i915->unordered_wq, &rps->work); 150 } else { 151 rps->last_adj = 0; 152 } 153 154 mod_timer(&rps->timer, 155 jiffies + msecs_to_jiffies(rps->pm_interval)); 156 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); 157 } 158 } 159 160 static void rps_start_timer(struct intel_rps *rps) 161 { 162 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 163 rps->pm_interval = 1; 164 mod_timer(&rps->timer, jiffies + 1); 165 } 166 167 static void rps_stop_timer(struct intel_rps *rps) 168 { 169 timer_delete_sync(&rps->timer); 170 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 171 cancel_work_sync(&rps->work); 172 } 173 174 static u32 rps_pm_mask(struct intel_rps *rps, u8 val) 175 { 176 u32 mask = 0; 177 178 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ 179 if (val > rps->min_freq_softlimit) 180 mask |= (GEN6_PM_RP_UP_EI_EXPIRED | 181 GEN6_PM_RP_DOWN_THRESHOLD | 182 GEN6_PM_RP_DOWN_TIMEOUT); 183 184 if (val < rps->max_freq_softlimit) 185 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 186 187 mask &= rps->pm_events; 188 189 return rps_pm_sanitize_mask(rps, ~mask); 190 } 191 192 static void rps_reset_ei(struct intel_rps *rps) 193 { 194 memset(&rps->ei, 0, sizeof(rps->ei)); 195 } 196 197 static void rps_enable_interrupts(struct intel_rps *rps) 198 { 199 struct intel_gt *gt = rps_to_gt(rps); 200 201 GEM_BUG_ON(rps_uses_slpc(rps)); 202 203 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", 204 rps->pm_events, rps_pm_mask(rps, rps->last_freq)); 205 206 rps_reset_ei(rps); 207 208 spin_lock_irq(gt->irq_lock); 209 gen6_gt_pm_enable_irq(gt, rps->pm_events); 210 spin_unlock_irq(gt->irq_lock); 211 212 intel_uncore_write(gt->uncore, 213 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); 214 } 215 216 static void gen6_rps_reset_interrupts(struct intel_rps *rps) 217 { 218 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); 219 } 220 221 static void gen11_rps_reset_interrupts(struct intel_rps *rps) 222 { 223 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) 224 ; 225 } 226 227 static void rps_reset_interrupts(struct intel_rps *rps) 228 { 229 struct intel_gt *gt = rps_to_gt(rps); 230 231 spin_lock_irq(gt->irq_lock); 232 if (GRAPHICS_VER(gt->i915) >= 11) 233 gen11_rps_reset_interrupts(rps); 234 else 235 gen6_rps_reset_interrupts(rps); 236 237 rps->pm_iir = 0; 238 spin_unlock_irq(gt->irq_lock); 239 } 240 241 static void rps_disable_interrupts(struct intel_rps *rps) 242 { 243 struct intel_gt *gt = rps_to_gt(rps); 244 245 intel_uncore_write(gt->uncore, 246 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); 247 248 spin_lock_irq(gt->irq_lock); 249 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); 250 spin_unlock_irq(gt->irq_lock); 251 252 intel_synchronize_irq(gt->i915); 253 254 /* 255 * Now that we will not be generating any more work, flush any 256 * outstanding tasks. As we are called on the RPS idle path, 257 * we will reset the GPU to minimum frequencies, so the current 258 * state of the worker can be discarded. 259 */ 260 cancel_work_sync(&rps->work); 261 262 rps_reset_interrupts(rps); 263 GT_TRACE(gt, "interrupts:off\n"); 264 } 265 266 static const struct cparams { 267 u16 i; 268 u16 t; 269 u16 m; 270 u16 c; 271 } cparams[] = { 272 { 1, 1333, 301, 28664 }, 273 { 1, 1067, 294, 24460 }, 274 { 1, 800, 294, 25192 }, 275 { 0, 1333, 276, 27605 }, 276 { 0, 1067, 276, 27605 }, 277 { 0, 800, 231, 23784 }, 278 }; 279 280 static void gen5_rps_init(struct intel_rps *rps) 281 { 282 struct drm_i915_private *i915 = rps_to_i915(rps); 283 struct intel_uncore *uncore = rps_to_uncore(rps); 284 unsigned int fsb_freq, mem_freq; 285 u8 fmax, fmin, fstart; 286 u32 rgvmodectl; 287 int c_m, i; 288 289 fsb_freq = ilk_fsb_freq(i915); 290 mem_freq = ilk_mem_freq(i915); 291 292 if (fsb_freq <= 3200000) 293 c_m = 0; 294 else if (fsb_freq <= 4800000) 295 c_m = 1; 296 else 297 c_m = 2; 298 299 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 300 if (cparams[i].i == c_m && 301 cparams[i].t == DIV_ROUND_CLOSEST(mem_freq, 1000)) { 302 rps->ips.m = cparams[i].m; 303 rps->ips.c = cparams[i].c; 304 break; 305 } 306 } 307 308 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 309 310 /* Set up min, max, and cur for interrupt handling */ 311 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 312 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 313 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 314 MEMMODE_FSTART_SHIFT; 315 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", 316 fmax, fmin, fstart); 317 318 rps->min_freq = fmax; 319 rps->efficient_freq = fstart; 320 rps->max_freq = fmin; 321 } 322 323 static unsigned long 324 __ips_chipset_val(struct intel_ips *ips) 325 { 326 struct intel_uncore *uncore = 327 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 328 unsigned long now = jiffies_to_msecs(jiffies), dt; 329 unsigned long result; 330 u64 total, delta; 331 332 lockdep_assert_held(&mchdev_lock); 333 334 /* 335 * Prevent division-by-zero if we are asking too fast. 336 * Also, we don't get interesting results if we are polling 337 * faster than once in 10ms, so just return the saved value 338 * in such cases. 339 */ 340 dt = now - ips->last_time1; 341 if (dt <= 10) 342 return ips->chipset_power; 343 344 /* FIXME: handle per-counter overflow */ 345 total = intel_uncore_read(uncore, DMIEC); 346 total += intel_uncore_read(uncore, DDREC); 347 total += intel_uncore_read(uncore, CSIEC); 348 349 delta = total - ips->last_count1; 350 351 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); 352 353 ips->last_count1 = total; 354 ips->last_time1 = now; 355 356 ips->chipset_power = result; 357 358 return result; 359 } 360 361 static unsigned long ips_mch_val(struct intel_uncore *uncore) 362 { 363 unsigned int m, x, b; 364 u32 tsfs; 365 366 tsfs = intel_uncore_read(uncore, TSFS); 367 x = intel_uncore_read8(uncore, TR1); 368 369 b = tsfs & TSFS_INTR_MASK; 370 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; 371 372 return m * x / 127 - b; 373 } 374 375 static int _pxvid_to_vd(u8 pxvid) 376 { 377 if (pxvid == 0) 378 return 0; 379 380 if (pxvid >= 8 && pxvid < 31) 381 pxvid = 31; 382 383 return (pxvid + 2) * 125; 384 } 385 386 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) 387 { 388 const int vd = _pxvid_to_vd(pxvid); 389 390 if (INTEL_INFO(i915)->is_mobile) 391 return max(vd - 1125, 0); 392 393 return vd; 394 } 395 396 static void __gen5_ips_update(struct intel_ips *ips) 397 { 398 struct intel_uncore *uncore = 399 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 400 u64 now, delta, dt; 401 u32 count; 402 403 lockdep_assert_held(&mchdev_lock); 404 405 now = ktime_get_raw_ns(); 406 dt = now - ips->last_time2; 407 do_div(dt, NSEC_PER_MSEC); 408 409 /* Don't divide by 0 */ 410 if (dt <= 10) 411 return; 412 413 count = intel_uncore_read(uncore, GFXEC); 414 delta = count - ips->last_count2; 415 416 ips->last_count2 = count; 417 ips->last_time2 = now; 418 419 /* More magic constants... */ 420 ips->gfx_power = div_u64(delta * 1181, dt * 10); 421 } 422 423 static void gen5_rps_update(struct intel_rps *rps) 424 { 425 spin_lock_irq(&mchdev_lock); 426 __gen5_ips_update(&rps->ips); 427 spin_unlock_irq(&mchdev_lock); 428 } 429 430 static unsigned int gen5_invert_freq(struct intel_rps *rps, 431 unsigned int val) 432 { 433 /* Invert the frequency bin into an ips delay */ 434 val = rps->max_freq - val; 435 val = rps->min_freq + val; 436 437 return val; 438 } 439 440 static int __gen5_rps_set(struct intel_rps *rps, u8 val) 441 { 442 struct intel_uncore *uncore = rps_to_uncore(rps); 443 u16 rgvswctl; 444 445 lockdep_assert_held(&mchdev_lock); 446 447 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 448 if (rgvswctl & MEMCTL_CMD_STS) { 449 drm_dbg(&rps_to_i915(rps)->drm, 450 "gpu busy, RCS change rejected\n"); 451 return -EBUSY; /* still busy with another command */ 452 } 453 454 /* Invert the frequency bin into an ips delay */ 455 val = gen5_invert_freq(rps, val); 456 457 rgvswctl = 458 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 459 (val << MEMCTL_FREQ_SHIFT) | 460 MEMCTL_SFCAVM; 461 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 462 intel_uncore_posting_read16(uncore, MEMSWCTL); 463 464 rgvswctl |= MEMCTL_CMD_STS; 465 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 466 467 return 0; 468 } 469 470 static int gen5_rps_set(struct intel_rps *rps, u8 val) 471 { 472 int err; 473 474 spin_lock_irq(&mchdev_lock); 475 err = __gen5_rps_set(rps, val); 476 spin_unlock_irq(&mchdev_lock); 477 478 return err; 479 } 480 481 static unsigned long intel_pxfreq(u32 vidfreq) 482 { 483 int div = (vidfreq & 0x3f0000) >> 16; 484 int post = (vidfreq & 0x3000) >> 12; 485 int pre = (vidfreq & 0x7); 486 487 if (!pre) 488 return 0; 489 490 return div * 133333 / (pre << post); 491 } 492 493 static unsigned int init_emon(struct intel_uncore *uncore) 494 { 495 u8 pxw[16]; 496 int i; 497 498 /* Disable to program */ 499 intel_uncore_write(uncore, ECR, 0); 500 intel_uncore_posting_read(uncore, ECR); 501 502 /* Program energy weights for various events */ 503 intel_uncore_write(uncore, SDEW, 0x15040d00); 504 intel_uncore_write(uncore, CSIEW0, 0x007f0000); 505 intel_uncore_write(uncore, CSIEW1, 0x1e220004); 506 intel_uncore_write(uncore, CSIEW2, 0x04000004); 507 508 for (i = 0; i < 5; i++) 509 intel_uncore_write(uncore, PEW(i), 0); 510 for (i = 0; i < 3; i++) 511 intel_uncore_write(uncore, DEW(i), 0); 512 513 /* Program P-state weights to account for frequency power adjustment */ 514 for (i = 0; i < 16; i++) { 515 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); 516 unsigned int freq = intel_pxfreq(pxvidfreq); 517 unsigned int vid = 518 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 519 unsigned int val; 520 521 val = vid * vid * freq / 1000 * 255; 522 val /= 127 * 127 * 900; 523 524 pxw[i] = val; 525 } 526 /* Render standby states get 0 weight */ 527 pxw[14] = 0; 528 pxw[15] = 0; 529 530 for (i = 0; i < 4; i++) { 531 intel_uncore_write(uncore, PXW(i), 532 pxw[i * 4 + 0] << 24 | 533 pxw[i * 4 + 1] << 16 | 534 pxw[i * 4 + 2] << 8 | 535 pxw[i * 4 + 3] << 0); 536 } 537 538 /* Adjust magic regs to magic values (more experimental results) */ 539 intel_uncore_write(uncore, OGW0, 0); 540 intel_uncore_write(uncore, OGW1, 0); 541 intel_uncore_write(uncore, EG0, 0x00007f00); 542 intel_uncore_write(uncore, EG1, 0x0000000e); 543 intel_uncore_write(uncore, EG2, 0x000e0000); 544 intel_uncore_write(uncore, EG3, 0x68000300); 545 intel_uncore_write(uncore, EG4, 0x42000000); 546 intel_uncore_write(uncore, EG5, 0x00140031); 547 intel_uncore_write(uncore, EG6, 0); 548 intel_uncore_write(uncore, EG7, 0); 549 550 for (i = 0; i < 8; i++) 551 intel_uncore_write(uncore, PXWL(i), 0); 552 553 /* Enable PMON + select events */ 554 intel_uncore_write(uncore, ECR, 0x80000019); 555 556 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; 557 } 558 559 static bool gen5_rps_enable(struct intel_rps *rps) 560 { 561 struct drm_i915_private *i915 = rps_to_i915(rps); 562 struct intel_display *display = i915->display; 563 struct intel_uncore *uncore = rps_to_uncore(rps); 564 u8 fstart, vstart; 565 u32 rgvmodectl; 566 567 spin_lock_irq(&mchdev_lock); 568 569 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 570 571 /* Enable temp reporting */ 572 intel_uncore_write16(uncore, PMMISC, 573 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); 574 intel_uncore_write16(uncore, TSC1, 575 intel_uncore_read16(uncore, TSC1) | TSE); 576 577 /* 100ms RC evaluation intervals */ 578 intel_uncore_write(uncore, RCUPEI, 100000); 579 intel_uncore_write(uncore, RCDNEI, 100000); 580 581 /* Set max/min thresholds to 90ms and 80ms respectively */ 582 intel_uncore_write(uncore, RCBMAXAVG, 90000); 583 intel_uncore_write(uncore, RCBMINAVG, 80000); 584 585 intel_uncore_write(uncore, MEMIHYST, 1); 586 587 /* Set up min, max, and cur for interrupt handling */ 588 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 589 MEMMODE_FSTART_SHIFT; 590 591 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & 592 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 593 594 intel_uncore_write(uncore, 595 MEMINTREN, 596 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 597 598 intel_uncore_write(uncore, VIDSTART, vstart); 599 intel_uncore_posting_read(uncore, VIDSTART); 600 601 rgvmodectl |= MEMMODE_SWMODE_EN; 602 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); 603 604 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & 605 MEMCTL_CMD_STS) == 0, 10)) 606 drm_err(&uncore->i915->drm, 607 "stuck trying to change perf mode\n"); 608 mdelay(1); 609 610 __gen5_rps_set(rps, rps->cur_freq); 611 612 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); 613 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); 614 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); 615 rps->ips.last_time1 = jiffies_to_msecs(jiffies); 616 617 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); 618 rps->ips.last_time2 = ktime_get_raw_ns(); 619 620 ilk_display_rps_enable(display); 621 622 spin_unlock_irq(&mchdev_lock); 623 624 rps->ips.corr = init_emon(uncore); 625 626 return true; 627 } 628 629 static void gen5_rps_disable(struct intel_rps *rps) 630 { 631 struct drm_i915_private *i915 = rps_to_i915(rps); 632 struct intel_display *display = i915->display; 633 struct intel_uncore *uncore = rps_to_uncore(rps); 634 u16 rgvswctl; 635 636 spin_lock_irq(&mchdev_lock); 637 638 ilk_display_rps_disable(display); 639 640 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 641 642 /* Ack interrupts, disable EFC interrupt */ 643 intel_uncore_rmw(uncore, MEMINTREN, MEMINT_EVAL_CHG_EN, 0); 644 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 645 646 /* Go back to the starting frequency */ 647 __gen5_rps_set(rps, rps->idle_freq); 648 mdelay(1); 649 rgvswctl |= MEMCTL_CMD_STS; 650 intel_uncore_write(uncore, MEMSWCTL, rgvswctl); 651 mdelay(1); 652 653 spin_unlock_irq(&mchdev_lock); 654 } 655 656 static u32 rps_limits(struct intel_rps *rps, u8 val) 657 { 658 u32 limits; 659 660 /* 661 * Only set the down limit when we've reached the lowest level to avoid 662 * getting more interrupts, otherwise leave this clear. This prevents a 663 * race in the hw when coming out of rc6: There's a tiny window where 664 * the hw runs at the minimal clock before selecting the desired 665 * frequency, if the down threshold expires in that window we will not 666 * receive a down interrupt. 667 */ 668 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 669 limits = rps->max_freq_softlimit << 23; 670 if (val <= rps->min_freq_softlimit) 671 limits |= rps->min_freq_softlimit << 14; 672 } else { 673 limits = rps->max_freq_softlimit << 24; 674 if (val <= rps->min_freq_softlimit) 675 limits |= rps->min_freq_softlimit << 16; 676 } 677 678 return limits; 679 } 680 681 static void rps_set_power(struct intel_rps *rps, int new_power) 682 { 683 struct intel_gt *gt = rps_to_gt(rps); 684 struct intel_uncore *uncore = gt->uncore; 685 u32 ei_up = 0, ei_down = 0; 686 687 lockdep_assert_held(&rps->power.mutex); 688 689 if (new_power == rps->power.mode) 690 return; 691 692 /* Note the units here are not exactly 1us, but 1280ns. */ 693 switch (new_power) { 694 case LOW_POWER: 695 ei_up = 16000; 696 ei_down = 32000; 697 break; 698 699 case BETWEEN: 700 ei_up = 13000; 701 ei_down = 32000; 702 break; 703 704 case HIGH_POWER: 705 ei_up = 10000; 706 ei_down = 32000; 707 break; 708 } 709 710 /* When byt can survive without system hang with dynamic 711 * sw freq adjustments, this restriction can be lifted. 712 */ 713 if (IS_VALLEYVIEW(gt->i915)) 714 goto skip_hw_write; 715 716 GT_TRACE(gt, 717 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", 718 new_power, 719 rps->power.up_threshold, ei_up, 720 rps->power.down_threshold, ei_down); 721 722 set(uncore, GEN6_RP_UP_EI, 723 intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); 724 set(uncore, GEN6_RP_UP_THRESHOLD, 725 intel_gt_ns_to_pm_interval(gt, 726 ei_up * rps->power.up_threshold * 10)); 727 728 set(uncore, GEN6_RP_DOWN_EI, 729 intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); 730 set(uncore, GEN6_RP_DOWN_THRESHOLD, 731 intel_gt_ns_to_pm_interval(gt, 732 ei_down * 733 rps->power.down_threshold * 10)); 734 735 set(uncore, GEN6_RP_CONTROL, 736 (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | 737 GEN6_RP_MEDIA_HW_NORMAL_MODE | 738 GEN6_RP_MEDIA_IS_GFX | 739 GEN6_RP_ENABLE | 740 GEN6_RP_UP_BUSY_AVG | 741 GEN6_RP_DOWN_IDLE_AVG); 742 743 skip_hw_write: 744 rps->power.mode = new_power; 745 } 746 747 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) 748 { 749 int new_power; 750 751 new_power = rps->power.mode; 752 switch (rps->power.mode) { 753 case LOW_POWER: 754 if (val > rps->efficient_freq + 1 && 755 val > rps->cur_freq) 756 new_power = BETWEEN; 757 break; 758 759 case BETWEEN: 760 if (val <= rps->efficient_freq && 761 val < rps->cur_freq) 762 new_power = LOW_POWER; 763 else if (val >= rps->rp0_freq && 764 val > rps->cur_freq) 765 new_power = HIGH_POWER; 766 break; 767 768 case HIGH_POWER: 769 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && 770 val < rps->cur_freq) 771 new_power = BETWEEN; 772 break; 773 } 774 /* Max/min bins are special */ 775 if (val <= rps->min_freq_softlimit) 776 new_power = LOW_POWER; 777 if (val >= rps->max_freq_softlimit) 778 new_power = HIGH_POWER; 779 780 mutex_lock(&rps->power.mutex); 781 if (rps->power.interactive) 782 new_power = HIGH_POWER; 783 rps_set_power(rps, new_power); 784 mutex_unlock(&rps->power.mutex); 785 } 786 787 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) 788 { 789 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", 790 str_yes_no(interactive)); 791 792 mutex_lock(&rps->power.mutex); 793 if (interactive) { 794 if (!rps->power.interactive++ && intel_rps_is_active(rps)) 795 rps_set_power(rps, HIGH_POWER); 796 } else { 797 GEM_BUG_ON(!rps->power.interactive); 798 rps->power.interactive--; 799 } 800 mutex_unlock(&rps->power.mutex); 801 } 802 803 static int gen6_rps_set(struct intel_rps *rps, u8 val) 804 { 805 struct intel_uncore *uncore = rps_to_uncore(rps); 806 struct drm_i915_private *i915 = rps_to_i915(rps); 807 u32 swreq; 808 809 GEM_BUG_ON(rps_uses_slpc(rps)); 810 811 if (GRAPHICS_VER(i915) >= 9) 812 swreq = GEN9_FREQUENCY(val); 813 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 814 swreq = HSW_FREQUENCY(val); 815 else 816 swreq = (GEN6_FREQUENCY(val) | 817 GEN6_OFFSET(0) | 818 GEN6_AGGRESSIVE_TURBO); 819 set(uncore, GEN6_RPNSWREQ, swreq); 820 821 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", 822 val, intel_gpu_freq(rps, val), swreq); 823 824 return 0; 825 } 826 827 static int vlv_rps_set(struct intel_rps *rps, u8 val) 828 { 829 struct drm_i915_private *i915 = rps_to_i915(rps); 830 int err; 831 832 vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT)); 833 err = vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_REQ, val); 834 vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT)); 835 836 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", 837 val, intel_gpu_freq(rps, val)); 838 839 return err; 840 } 841 842 static int rps_set(struct intel_rps *rps, u8 val, bool update) 843 { 844 struct drm_i915_private *i915 = rps_to_i915(rps); 845 int err; 846 847 if (val == rps->last_freq) 848 return 0; 849 850 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 851 err = vlv_rps_set(rps, val); 852 else if (GRAPHICS_VER(i915) >= 6) 853 err = gen6_rps_set(rps, val); 854 else 855 err = gen5_rps_set(rps, val); 856 if (err) 857 return err; 858 859 if (update && GRAPHICS_VER(i915) >= 6) 860 gen6_rps_set_thresholds(rps, val); 861 rps->last_freq = val; 862 863 return 0; 864 } 865 866 void intel_rps_unpark(struct intel_rps *rps) 867 { 868 if (!intel_rps_is_enabled(rps)) 869 return; 870 871 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); 872 873 /* 874 * Use the user's desired frequency as a guide, but for better 875 * performance, jump directly to RPe as our starting frequency. 876 */ 877 mutex_lock(&rps->lock); 878 879 intel_rps_set_active(rps); 880 intel_rps_set(rps, 881 clamp(rps->cur_freq, 882 rps->min_freq_softlimit, 883 rps->max_freq_softlimit)); 884 885 mutex_unlock(&rps->lock); 886 887 rps->pm_iir = 0; 888 if (intel_rps_has_interrupts(rps)) 889 rps_enable_interrupts(rps); 890 if (intel_rps_uses_timer(rps)) 891 rps_start_timer(rps); 892 893 if (GRAPHICS_VER(rps_to_i915(rps)) == 5) 894 gen5_rps_update(rps); 895 } 896 897 void intel_rps_park(struct intel_rps *rps) 898 { 899 int adj; 900 901 if (!intel_rps_is_enabled(rps)) 902 return; 903 904 if (!intel_rps_clear_active(rps)) 905 return; 906 907 if (intel_rps_uses_timer(rps)) 908 rps_stop_timer(rps); 909 if (intel_rps_has_interrupts(rps)) 910 rps_disable_interrupts(rps); 911 912 if (rps->last_freq <= rps->idle_freq) 913 return; 914 915 /* 916 * The punit delays the write of the frequency and voltage until it 917 * determines the GPU is awake. During normal usage we don't want to 918 * waste power changing the frequency if the GPU is sleeping (rc6). 919 * However, the GPU and driver is now idle and we do not want to delay 920 * switching to minimum voltage (reducing power whilst idle) as we do 921 * not expect to be woken in the near future and so must flush the 922 * change by waking the device. 923 * 924 * We choose to take the media powerwell (either would do to trick the 925 * punit into committing the voltage change) as that takes a lot less 926 * power than the render powerwell. 927 */ 928 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); 929 rps_set(rps, rps->idle_freq, false); 930 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); 931 932 /* 933 * Since we will try and restart from the previously requested 934 * frequency on unparking, treat this idle point as a downclock 935 * interrupt and reduce the frequency for resume. If we park/unpark 936 * more frequently than the rps worker can run, we will not respond 937 * to any EI and never see a change in frequency. 938 * 939 * (Note we accommodate Cherryview's limitation of only using an 940 * even bin by applying it to all.) 941 */ 942 adj = rps->last_adj; 943 if (adj < 0) 944 adj *= 2; 945 else /* CHV needs even encode values */ 946 adj = -2; 947 rps->last_adj = adj; 948 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); 949 if (rps->cur_freq < rps->efficient_freq) { 950 rps->cur_freq = rps->efficient_freq; 951 rps->last_adj = 0; 952 } 953 954 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); 955 } 956 957 u32 intel_rps_get_boost_frequency(struct intel_rps *rps) 958 { 959 struct intel_guc_slpc *slpc; 960 961 if (rps_uses_slpc(rps)) { 962 slpc = rps_to_slpc(rps); 963 964 return slpc->boost_freq; 965 } else { 966 return intel_gpu_freq(rps, rps->boost_freq); 967 } 968 } 969 970 static int rps_set_boost_freq(struct intel_rps *rps, u32 val) 971 { 972 bool boost = false; 973 974 /* Validate against (static) hardware limits */ 975 val = intel_freq_opcode(rps, val); 976 if (val < rps->min_freq || val > rps->max_freq) 977 return -EINVAL; 978 979 mutex_lock(&rps->lock); 980 if (val != rps->boost_freq) { 981 rps->boost_freq = val; 982 boost = atomic_read(&rps->num_waiters); 983 } 984 mutex_unlock(&rps->lock); 985 if (boost) 986 queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work); 987 988 return 0; 989 } 990 991 int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) 992 { 993 struct intel_guc_slpc *slpc; 994 995 if (rps_uses_slpc(rps)) { 996 slpc = rps_to_slpc(rps); 997 998 return intel_guc_slpc_set_boost_freq(slpc, freq); 999 } else { 1000 return rps_set_boost_freq(rps, freq); 1001 } 1002 } 1003 1004 void intel_rps_dec_waiters(struct intel_rps *rps) 1005 { 1006 struct intel_guc_slpc *slpc; 1007 1008 if (rps_uses_slpc(rps)) { 1009 slpc = rps_to_slpc(rps); 1010 1011 /* Don't decrement num_waiters for req where increment was skipped */ 1012 if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING) 1013 return; 1014 1015 intel_guc_slpc_dec_waiters(slpc); 1016 } else { 1017 atomic_dec(&rps->num_waiters); 1018 } 1019 } 1020 1021 void intel_rps_boost(struct i915_request *rq) 1022 { 1023 struct intel_guc_slpc *slpc; 1024 1025 if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) 1026 return; 1027 1028 /* Waitboost is not needed for contexts marked with a Freq hint */ 1029 if (test_bit(CONTEXT_LOW_LATENCY, &rq->context->flags)) 1030 return; 1031 1032 /* Serializes with i915_request_retire() */ 1033 if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { 1034 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; 1035 1036 if (rps_uses_slpc(rps)) { 1037 slpc = rps_to_slpc(rps); 1038 1039 /* Waitboost should not be done with power saving profile */ 1040 if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING) 1041 return; 1042 1043 /* Return if old value is non zero */ 1044 if (!atomic_fetch_inc(&slpc->num_waiters)) { 1045 /* 1046 * Skip queuing boost work if frequency is already boosted, 1047 * but still increment num_waiters. 1048 */ 1049 if (slpc->min_freq_softlimit >= slpc->boost_freq) 1050 return; 1051 1052 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 1053 rq->fence.context, rq->fence.seqno); 1054 queue_work(rps_to_gt(rps)->i915->unordered_wq, 1055 &slpc->boost_work); 1056 } 1057 1058 return; 1059 } 1060 1061 if (atomic_fetch_inc(&rps->num_waiters)) 1062 return; 1063 1064 if (!intel_rps_is_active(rps)) 1065 return; 1066 1067 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 1068 rq->fence.context, rq->fence.seqno); 1069 1070 if (READ_ONCE(rps->cur_freq) < rps->boost_freq) 1071 queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work); 1072 1073 WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */ 1074 } 1075 } 1076 1077 int intel_rps_set(struct intel_rps *rps, u8 val) 1078 { 1079 int err; 1080 1081 lockdep_assert_held(&rps->lock); 1082 GEM_BUG_ON(val > rps->max_freq); 1083 GEM_BUG_ON(val < rps->min_freq); 1084 1085 if (intel_rps_is_active(rps)) { 1086 err = rps_set(rps, val, true); 1087 if (err) 1088 return err; 1089 1090 /* 1091 * Make sure we continue to get interrupts 1092 * until we hit the minimum or maximum frequencies. 1093 */ 1094 if (intel_rps_has_interrupts(rps)) { 1095 struct intel_uncore *uncore = rps_to_uncore(rps); 1096 1097 set(uncore, 1098 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val)); 1099 1100 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val)); 1101 } 1102 } 1103 1104 rps->cur_freq = val; 1105 return 0; 1106 } 1107 1108 static u32 intel_rps_read_state_cap(struct intel_rps *rps) 1109 { 1110 struct drm_i915_private *i915 = rps_to_i915(rps); 1111 struct intel_uncore *uncore = rps_to_uncore(rps); 1112 1113 if (IS_GEN9_LP(i915)) 1114 return intel_uncore_read(uncore, BXT_RP_STATE_CAP); 1115 else 1116 return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); 1117 } 1118 1119 static void 1120 mtl_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1121 { 1122 struct intel_uncore *uncore = rps_to_uncore(rps); 1123 u32 rp_state_cap = rps_to_gt(rps)->type == GT_MEDIA ? 1124 intel_uncore_read(uncore, MTL_MEDIAP_STATE_CAP) : 1125 intel_uncore_read(uncore, MTL_RP_STATE_CAP); 1126 u32 rpe = rps_to_gt(rps)->type == GT_MEDIA ? 1127 intel_uncore_read(uncore, MTL_MPE_FREQUENCY) : 1128 intel_uncore_read(uncore, MTL_GT_RPE_FREQUENCY); 1129 1130 /* MTL values are in units of 16.67 MHz */ 1131 caps->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, rp_state_cap); 1132 caps->min_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, rp_state_cap); 1133 caps->rp1_freq = REG_FIELD_GET(MTL_RPE_MASK, rpe); 1134 } 1135 1136 static void 1137 __gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1138 { 1139 struct drm_i915_private *i915 = rps_to_i915(rps); 1140 u32 rp_state_cap; 1141 1142 rp_state_cap = intel_rps_read_state_cap(rps); 1143 1144 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 1145 if (IS_GEN9_LP(i915)) { 1146 caps->rp0_freq = (rp_state_cap >> 16) & 0xff; 1147 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; 1148 caps->min_freq = (rp_state_cap >> 0) & 0xff; 1149 } else { 1150 caps->rp0_freq = (rp_state_cap >> 0) & 0xff; 1151 if (GRAPHICS_VER(i915) >= 10) 1152 caps->rp1_freq = REG_FIELD_GET(RPE_MASK, 1153 intel_uncore_read(to_gt(i915)->uncore, 1154 GEN10_FREQ_INFO_REC)); 1155 else 1156 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; 1157 caps->min_freq = (rp_state_cap >> 16) & 0xff; 1158 } 1159 1160 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 1161 /* 1162 * In this case rp_state_cap register reports frequencies in 1163 * units of 50 MHz. Convert these to the actual "hw unit", i.e. 1164 * units of 16.67 MHz 1165 */ 1166 caps->rp0_freq *= GEN9_FREQ_SCALER; 1167 caps->rp1_freq *= GEN9_FREQ_SCALER; 1168 caps->min_freq *= GEN9_FREQ_SCALER; 1169 } 1170 } 1171 1172 /** 1173 * gen6_rps_get_freq_caps - Get freq caps exposed by HW 1174 * @rps: the intel_rps structure 1175 * @caps: returned freq caps 1176 * 1177 * Returned "caps" frequencies should be converted to MHz using 1178 * intel_gpu_freq() 1179 */ 1180 void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1181 { 1182 struct drm_i915_private *i915 = rps_to_i915(rps); 1183 1184 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 1185 return mtl_get_freq_caps(rps, caps); 1186 else 1187 return __gen6_rps_get_freq_caps(rps, caps); 1188 } 1189 1190 static void gen6_rps_init(struct intel_rps *rps) 1191 { 1192 struct drm_i915_private *i915 = rps_to_i915(rps); 1193 struct intel_rps_freq_caps caps; 1194 1195 gen6_rps_get_freq_caps(rps, &caps); 1196 rps->rp0_freq = caps.rp0_freq; 1197 rps->rp1_freq = caps.rp1_freq; 1198 rps->min_freq = caps.min_freq; 1199 1200 /* hw_max = RP0 until we check for overclocking */ 1201 rps->max_freq = rps->rp0_freq; 1202 1203 rps->efficient_freq = rps->rp1_freq; 1204 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 1205 IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 1206 u32 ddcc_status = 0; 1207 u32 mult = 1; 1208 1209 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) 1210 mult = GEN9_FREQ_SCALER; 1211 if (snb_pcode_read(rps_to_gt(rps)->uncore, 1212 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 1213 &ddcc_status, NULL) == 0) 1214 rps->efficient_freq = 1215 clamp_t(u32, 1216 ((ddcc_status >> 8) & 0xff) * mult, 1217 rps->min_freq, 1218 rps->max_freq); 1219 } 1220 } 1221 1222 static bool rps_reset(struct intel_rps *rps) 1223 { 1224 struct drm_i915_private *i915 = rps_to_i915(rps); 1225 1226 /* force a reset */ 1227 rps->power.mode = -1; 1228 rps->last_freq = -1; 1229 1230 if (rps_set(rps, rps->min_freq, true)) { 1231 drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); 1232 return false; 1233 } 1234 1235 rps->cur_freq = rps->min_freq; 1236 return true; 1237 } 1238 1239 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 1240 static bool gen9_rps_enable(struct intel_rps *rps) 1241 { 1242 struct intel_gt *gt = rps_to_gt(rps); 1243 struct intel_uncore *uncore = gt->uncore; 1244 1245 /* Program defaults and thresholds for RPS */ 1246 if (GRAPHICS_VER(gt->i915) == 9) 1247 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1248 GEN9_FREQUENCY(rps->rp1_freq)); 1249 1250 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); 1251 1252 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1253 1254 return rps_reset(rps); 1255 } 1256 1257 static bool gen8_rps_enable(struct intel_rps *rps) 1258 { 1259 struct intel_uncore *uncore = rps_to_uncore(rps); 1260 1261 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1262 HSW_FREQUENCY(rps->rp1_freq)); 1263 1264 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1265 1266 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1267 1268 return rps_reset(rps); 1269 } 1270 1271 static bool gen6_rps_enable(struct intel_rps *rps) 1272 { 1273 struct intel_uncore *uncore = rps_to_uncore(rps); 1274 1275 /* Power down if completely idle for over 50ms */ 1276 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); 1277 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1278 1279 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1280 GEN6_PM_RP_DOWN_THRESHOLD | 1281 GEN6_PM_RP_DOWN_TIMEOUT); 1282 1283 return rps_reset(rps); 1284 } 1285 1286 static int chv_rps_max_freq(struct intel_rps *rps) 1287 { 1288 struct drm_i915_private *i915 = rps_to_i915(rps); 1289 struct intel_gt *gt = rps_to_gt(rps); 1290 u32 val; 1291 1292 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMAX_AT_VMAX_FUSE); 1293 1294 switch (gt->info.sseu.eu_total) { 1295 case 8: 1296 /* (2 * 4) config */ 1297 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; 1298 break; 1299 case 12: 1300 /* (2 * 6) config */ 1301 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; 1302 break; 1303 case 16: 1304 /* (2 * 8) config */ 1305 default: 1306 /* Setting (2 * 8) Min RP0 for any other combination */ 1307 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; 1308 break; 1309 } 1310 1311 return val & FB_GFX_FREQ_FUSE_MASK; 1312 } 1313 1314 static int chv_rps_rpe_freq(struct intel_rps *rps) 1315 { 1316 struct drm_i915_private *i915 = rps_to_i915(rps); 1317 u32 val; 1318 1319 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_GPU_DUTYCYCLE_REG); 1320 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; 1321 1322 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 1323 } 1324 1325 static int chv_rps_guar_freq(struct intel_rps *rps) 1326 { 1327 struct drm_i915_private *i915 = rps_to_i915(rps); 1328 u32 val; 1329 1330 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMAX_AT_VMAX_FUSE); 1331 1332 return val & FB_GFX_FREQ_FUSE_MASK; 1333 } 1334 1335 static u32 chv_rps_min_freq(struct intel_rps *rps) 1336 { 1337 struct drm_i915_private *i915 = rps_to_i915(rps); 1338 u32 val; 1339 1340 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, FB_GFX_FMIN_AT_VMIN_FUSE); 1341 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; 1342 1343 return val & FB_GFX_FREQ_FUSE_MASK; 1344 } 1345 1346 static bool chv_rps_enable(struct intel_rps *rps) 1347 { 1348 struct intel_uncore *uncore = rps_to_uncore(rps); 1349 struct drm_i915_private *i915 = rps_to_i915(rps); 1350 u32 val; 1351 1352 /* 1: Program defaults and thresholds for RPS*/ 1353 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1354 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1355 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1356 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1357 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1358 1359 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1360 1361 /* 2: Enable RPS */ 1362 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1363 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1364 GEN6_RP_MEDIA_IS_GFX | 1365 GEN6_RP_ENABLE | 1366 GEN6_RP_UP_BUSY_AVG | 1367 GEN6_RP_DOWN_IDLE_AVG); 1368 1369 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1370 GEN6_PM_RP_DOWN_THRESHOLD | 1371 GEN6_PM_RP_DOWN_TIMEOUT); 1372 1373 /* Setting Fixed Bias */ 1374 vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT)); 1375 1376 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; 1377 vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, VLV_TURBO_SOC_OVERRIDE, val); 1378 1379 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS); 1380 1381 vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT)); 1382 1383 /* RPS code assumes GPLL is used */ 1384 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1385 "GPLL not enabled\n"); 1386 1387 drm_dbg(&i915->drm, "GPLL enabled? %s\n", 1388 str_yes_no(val & GPLLENABLE)); 1389 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1390 1391 return rps_reset(rps); 1392 } 1393 1394 static int vlv_rps_guar_freq(struct intel_rps *rps) 1395 { 1396 struct drm_i915_private *i915 = rps_to_i915(rps); 1397 u32 val, rp1; 1398 1399 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FREQ_FUSE); 1400 1401 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; 1402 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 1403 1404 return rp1; 1405 } 1406 1407 static int vlv_rps_max_freq(struct intel_rps *rps) 1408 { 1409 struct drm_i915_private *i915 = rps_to_i915(rps); 1410 u32 val, rp0; 1411 1412 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FREQ_FUSE); 1413 1414 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 1415 /* Clamp to max */ 1416 rp0 = min_t(u32, rp0, 0xea); 1417 1418 return rp0; 1419 } 1420 1421 static int vlv_rps_rpe_freq(struct intel_rps *rps) 1422 { 1423 struct drm_i915_private *i915 = rps_to_i915(rps); 1424 u32 val, rpe; 1425 1426 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 1427 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 1428 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_NC, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 1429 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 1430 1431 return rpe; 1432 } 1433 1434 static int vlv_rps_min_freq(struct intel_rps *rps) 1435 { 1436 struct drm_i915_private *i915 = rps_to_i915(rps); 1437 u32 val; 1438 1439 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_LFM) & 0xff; 1440 /* 1441 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 1442 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 1443 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 1444 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 1445 * to make sure it matches what Punit accepts. 1446 */ 1447 return max_t(u32, val, 0xc0); 1448 } 1449 1450 static bool vlv_rps_enable(struct intel_rps *rps) 1451 { 1452 struct intel_uncore *uncore = rps_to_uncore(rps); 1453 struct drm_i915_private *i915 = rps_to_i915(rps); 1454 u32 val; 1455 1456 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1457 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1458 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1459 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1460 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1461 1462 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1463 1464 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1465 GEN6_RP_MEDIA_TURBO | 1466 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1467 GEN6_RP_MEDIA_IS_GFX | 1468 GEN6_RP_ENABLE | 1469 GEN6_RP_UP_BUSY_AVG | 1470 GEN6_RP_DOWN_IDLE_CONT); 1471 1472 /* WaGsvRC0ResidencyMethod:vlv */ 1473 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; 1474 1475 vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT)); 1476 1477 /* Setting Fixed Bias */ 1478 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; 1479 vlv_iosf_sb_write(&i915->drm, VLV_IOSF_SB_PUNIT, VLV_TURBO_SOC_OVERRIDE, val); 1480 1481 val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS); 1482 1483 vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT)); 1484 1485 /* RPS code assumes GPLL is used */ 1486 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1487 "GPLL not enabled\n"); 1488 1489 drm_dbg(&i915->drm, "GPLL enabled? %s\n", 1490 str_yes_no(val & GPLLENABLE)); 1491 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1492 1493 return rps_reset(rps); 1494 } 1495 1496 static unsigned long __ips_gfx_val(struct intel_ips *ips) 1497 { 1498 struct intel_rps *rps = container_of(ips, typeof(*rps), ips); 1499 struct intel_uncore *uncore = rps_to_uncore(rps); 1500 unsigned int t, state1, state2; 1501 u32 pxvid, ext_v; 1502 u64 corr, corr2; 1503 1504 lockdep_assert_held(&mchdev_lock); 1505 1506 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); 1507 pxvid = (pxvid >> 24) & 0x7f; 1508 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); 1509 1510 state1 = ext_v; 1511 1512 /* Revel in the empirically derived constants */ 1513 1514 /* Correction factor in 1/100000 units */ 1515 t = ips_mch_val(uncore); 1516 if (t > 80) 1517 corr = t * 2349 + 135940; 1518 else if (t >= 50) 1519 corr = t * 964 + 29317; 1520 else /* < 50 */ 1521 corr = t * 301 + 1004; 1522 1523 corr = div_u64(corr * 150142 * state1, 10000) - 78642; 1524 corr2 = div_u64(corr, 100000) * ips->corr; 1525 1526 state2 = div_u64(corr2 * state1, 10000); 1527 state2 /= 100; /* convert to mW */ 1528 1529 __gen5_ips_update(ips); 1530 1531 return ips->gfx_power + state2; 1532 } 1533 1534 static bool has_busy_stats(struct intel_rps *rps) 1535 { 1536 struct intel_engine_cs *engine; 1537 enum intel_engine_id id; 1538 1539 for_each_engine(engine, rps_to_gt(rps), id) { 1540 if (!intel_engine_supports_stats(engine)) 1541 return false; 1542 } 1543 1544 return true; 1545 } 1546 1547 void intel_rps_enable(struct intel_rps *rps) 1548 { 1549 struct drm_i915_private *i915 = rps_to_i915(rps); 1550 struct intel_uncore *uncore = rps_to_uncore(rps); 1551 bool enabled = false; 1552 1553 if (!HAS_RPS(i915)) 1554 return; 1555 1556 if (rps_uses_slpc(rps)) 1557 return; 1558 1559 intel_gt_check_clock_frequency(rps_to_gt(rps)); 1560 1561 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1562 if (rps->max_freq <= rps->min_freq) 1563 /* leave disabled, no room for dynamic reclocking */; 1564 else if (IS_CHERRYVIEW(i915)) 1565 enabled = chv_rps_enable(rps); 1566 else if (IS_VALLEYVIEW(i915)) 1567 enabled = vlv_rps_enable(rps); 1568 else if (GRAPHICS_VER(i915) >= 9) 1569 enabled = gen9_rps_enable(rps); 1570 else if (GRAPHICS_VER(i915) >= 8) 1571 enabled = gen8_rps_enable(rps); 1572 else if (GRAPHICS_VER(i915) >= 6) 1573 enabled = gen6_rps_enable(rps); 1574 else if (IS_IRONLAKE_M(i915)) 1575 enabled = gen5_rps_enable(rps); 1576 else 1577 MISSING_CASE(GRAPHICS_VER(i915)); 1578 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1579 if (!enabled) 1580 return; 1581 1582 GT_TRACE(rps_to_gt(rps), 1583 "min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n", 1584 rps->min_freq, rps->max_freq, 1585 intel_gpu_freq(rps, rps->min_freq), 1586 intel_gpu_freq(rps, rps->max_freq), 1587 rps->power.up_threshold, 1588 rps->power.down_threshold); 1589 1590 GEM_BUG_ON(rps->max_freq < rps->min_freq); 1591 GEM_BUG_ON(rps->idle_freq > rps->max_freq); 1592 1593 GEM_BUG_ON(rps->efficient_freq < rps->min_freq); 1594 GEM_BUG_ON(rps->efficient_freq > rps->max_freq); 1595 1596 if (has_busy_stats(rps)) 1597 intel_rps_set_timer(rps); 1598 else if (GRAPHICS_VER(i915) >= 6 && GRAPHICS_VER(i915) <= 11) 1599 intel_rps_set_interrupts(rps); 1600 else 1601 /* Ironlake currently uses intel_ips.ko */ {} 1602 1603 intel_rps_set_enabled(rps); 1604 } 1605 1606 static void gen6_rps_disable(struct intel_rps *rps) 1607 { 1608 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); 1609 } 1610 1611 void intel_rps_disable(struct intel_rps *rps) 1612 { 1613 struct drm_i915_private *i915 = rps_to_i915(rps); 1614 1615 if (!intel_rps_is_enabled(rps)) 1616 return; 1617 1618 intel_rps_clear_enabled(rps); 1619 intel_rps_clear_interrupts(rps); 1620 intel_rps_clear_timer(rps); 1621 1622 if (GRAPHICS_VER(i915) >= 6) 1623 gen6_rps_disable(rps); 1624 else if (IS_IRONLAKE_M(i915)) 1625 gen5_rps_disable(rps); 1626 } 1627 1628 static int byt_gpu_freq(struct intel_rps *rps, int val) 1629 { 1630 /* 1631 * N = val - 0xb7 1632 * Slow = Fast = GPLL ref * N 1633 */ 1634 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); 1635 } 1636 1637 static int byt_freq_opcode(struct intel_rps *rps, int val) 1638 { 1639 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; 1640 } 1641 1642 static int chv_gpu_freq(struct intel_rps *rps, int val) 1643 { 1644 /* 1645 * N = val / 2 1646 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 1647 */ 1648 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); 1649 } 1650 1651 static int chv_freq_opcode(struct intel_rps *rps, int val) 1652 { 1653 /* CHV needs even values */ 1654 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; 1655 } 1656 1657 int intel_gpu_freq(struct intel_rps *rps, int val) 1658 { 1659 struct drm_i915_private *i915 = rps_to_i915(rps); 1660 1661 if (GRAPHICS_VER(i915) >= 9) 1662 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 1663 GEN9_FREQ_SCALER); 1664 else if (IS_CHERRYVIEW(i915)) 1665 return chv_gpu_freq(rps, val); 1666 else if (IS_VALLEYVIEW(i915)) 1667 return byt_gpu_freq(rps, val); 1668 else if (GRAPHICS_VER(i915) >= 6) 1669 return val * GT_FREQUENCY_MULTIPLIER; 1670 else 1671 return val; 1672 } 1673 1674 int intel_freq_opcode(struct intel_rps *rps, int val) 1675 { 1676 struct drm_i915_private *i915 = rps_to_i915(rps); 1677 1678 if (GRAPHICS_VER(i915) >= 9) 1679 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 1680 GT_FREQUENCY_MULTIPLIER); 1681 else if (IS_CHERRYVIEW(i915)) 1682 return chv_freq_opcode(rps, val); 1683 else if (IS_VALLEYVIEW(i915)) 1684 return byt_freq_opcode(rps, val); 1685 else if (GRAPHICS_VER(i915) >= 6) 1686 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 1687 else 1688 return val; 1689 } 1690 1691 static void vlv_init_gpll_ref_freq(struct intel_rps *rps) 1692 { 1693 struct drm_i915_private *i915 = rps_to_i915(rps); 1694 1695 rps->gpll_ref_freq = vlv_clock_get_gpll(&i915->drm); 1696 1697 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", 1698 rps->gpll_ref_freq); 1699 } 1700 1701 static void vlv_rps_init(struct intel_rps *rps) 1702 { 1703 struct drm_i915_private *i915 = rps_to_i915(rps); 1704 1705 vlv_init_gpll_ref_freq(rps); 1706 1707 vlv_iosf_sb_get(&i915->drm, 1708 BIT(VLV_IOSF_SB_PUNIT) | 1709 BIT(VLV_IOSF_SB_NC) | 1710 BIT(VLV_IOSF_SB_CCK)); 1711 1712 rps->max_freq = vlv_rps_max_freq(rps); 1713 rps->rp0_freq = rps->max_freq; 1714 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1715 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1716 1717 rps->efficient_freq = vlv_rps_rpe_freq(rps); 1718 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1719 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1720 1721 rps->rp1_freq = vlv_rps_guar_freq(rps); 1722 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 1723 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1724 1725 rps->min_freq = vlv_rps_min_freq(rps); 1726 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1727 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1728 1729 vlv_iosf_sb_put(&i915->drm, 1730 BIT(VLV_IOSF_SB_PUNIT) | 1731 BIT(VLV_IOSF_SB_NC) | 1732 BIT(VLV_IOSF_SB_CCK)); 1733 } 1734 1735 static void chv_rps_init(struct intel_rps *rps) 1736 { 1737 struct drm_i915_private *i915 = rps_to_i915(rps); 1738 1739 vlv_init_gpll_ref_freq(rps); 1740 1741 vlv_iosf_sb_get(&i915->drm, 1742 BIT(VLV_IOSF_SB_PUNIT) | 1743 BIT(VLV_IOSF_SB_NC) | 1744 BIT(VLV_IOSF_SB_CCK)); 1745 1746 rps->max_freq = chv_rps_max_freq(rps); 1747 rps->rp0_freq = rps->max_freq; 1748 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1749 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1750 1751 rps->efficient_freq = chv_rps_rpe_freq(rps); 1752 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1753 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1754 1755 rps->rp1_freq = chv_rps_guar_freq(rps); 1756 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", 1757 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1758 1759 rps->min_freq = chv_rps_min_freq(rps); 1760 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1761 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1762 1763 vlv_iosf_sb_put(&i915->drm, 1764 BIT(VLV_IOSF_SB_PUNIT) | 1765 BIT(VLV_IOSF_SB_NC) | 1766 BIT(VLV_IOSF_SB_CCK)); 1767 1768 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | 1769 rps->rp1_freq | rps->min_freq) & 1, 1770 "Odd GPU freq values\n"); 1771 } 1772 1773 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) 1774 { 1775 ei->ktime = ktime_get_raw(); 1776 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); 1777 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); 1778 } 1779 1780 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) 1781 { 1782 struct drm_i915_private *i915 = rps_to_i915(rps); 1783 struct intel_uncore *uncore = rps_to_uncore(rps); 1784 const struct intel_rps_ei *prev = &rps->ei; 1785 struct intel_rps_ei now; 1786 u32 events = 0; 1787 1788 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1789 return 0; 1790 1791 vlv_c0_read(uncore, &now); 1792 1793 if (prev->ktime) { 1794 u64 time, c0; 1795 u32 render, media; 1796 1797 time = ktime_us_delta(now.ktime, prev->ktime); 1798 1799 time *= vlv_clock_get_czclk(&i915->drm); 1800 1801 /* Workload can be split between render + media, 1802 * e.g. SwapBuffers being blitted in X after being rendered in 1803 * mesa. To account for this we need to combine both engines 1804 * into our activity counter. 1805 */ 1806 render = now.render_c0 - prev->render_c0; 1807 media = now.media_c0 - prev->media_c0; 1808 c0 = max(render, media); 1809 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1810 1811 if (c0 > time * rps->power.up_threshold) 1812 events = GEN6_PM_RP_UP_THRESHOLD; 1813 else if (c0 < time * rps->power.down_threshold) 1814 events = GEN6_PM_RP_DOWN_THRESHOLD; 1815 } 1816 1817 rps->ei = now; 1818 return events; 1819 } 1820 1821 static void rps_work(struct work_struct *work) 1822 { 1823 struct intel_rps *rps = container_of(work, typeof(*rps), work); 1824 struct intel_gt *gt = rps_to_gt(rps); 1825 struct drm_i915_private *i915 = rps_to_i915(rps); 1826 bool client_boost = false; 1827 int new_freq, adj, min, max; 1828 u32 pm_iir = 0; 1829 1830 spin_lock_irq(gt->irq_lock); 1831 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; 1832 client_boost = atomic_read(&rps->num_waiters); 1833 spin_unlock_irq(gt->irq_lock); 1834 1835 /* Make sure we didn't queue anything we're not going to process. */ 1836 if (!pm_iir && !client_boost) 1837 goto out; 1838 1839 mutex_lock(&rps->lock); 1840 if (!intel_rps_is_active(rps)) { 1841 mutex_unlock(&rps->lock); 1842 return; 1843 } 1844 1845 pm_iir |= vlv_wa_c0_ei(rps, pm_iir); 1846 1847 adj = rps->last_adj; 1848 new_freq = rps->cur_freq; 1849 min = rps->min_freq_softlimit; 1850 max = rps->max_freq_softlimit; 1851 if (client_boost) 1852 max = rps->max_freq; 1853 1854 GT_TRACE(gt, 1855 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", 1856 pm_iir, str_yes_no(client_boost), 1857 adj, new_freq, min, max); 1858 1859 if (client_boost && new_freq < rps->boost_freq) { 1860 new_freq = rps->boost_freq; 1861 adj = 0; 1862 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1863 if (adj > 0) 1864 adj *= 2; 1865 else /* CHV needs even encode values */ 1866 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; 1867 1868 if (new_freq >= rps->max_freq_softlimit) 1869 adj = 0; 1870 } else if (client_boost) { 1871 adj = 0; 1872 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1873 if (rps->cur_freq > rps->efficient_freq) 1874 new_freq = rps->efficient_freq; 1875 else if (rps->cur_freq > rps->min_freq_softlimit) 1876 new_freq = rps->min_freq_softlimit; 1877 adj = 0; 1878 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1879 if (adj < 0) 1880 adj *= 2; 1881 else /* CHV needs even encode values */ 1882 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; 1883 1884 if (new_freq <= rps->min_freq_softlimit) 1885 adj = 0; 1886 } else { /* unknown event */ 1887 adj = 0; 1888 } 1889 1890 /* 1891 * sysfs frequency limits may have snuck in while 1892 * servicing the interrupt 1893 */ 1894 new_freq += adj; 1895 new_freq = clamp_t(int, new_freq, min, max); 1896 1897 if (intel_rps_set(rps, new_freq)) { 1898 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); 1899 adj = 0; 1900 } 1901 rps->last_adj = adj; 1902 1903 mutex_unlock(&rps->lock); 1904 1905 out: 1906 spin_lock_irq(gt->irq_lock); 1907 gen6_gt_pm_unmask_irq(gt, rps->pm_events); 1908 spin_unlock_irq(gt->irq_lock); 1909 } 1910 1911 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1912 { 1913 struct intel_gt *gt = rps_to_gt(rps); 1914 const u32 events = rps->pm_events & pm_iir; 1915 1916 lockdep_assert_held(gt->irq_lock); 1917 1918 if (unlikely(!events)) 1919 return; 1920 1921 GT_TRACE(gt, "irq events:%x\n", events); 1922 1923 gen6_gt_pm_mask_irq(gt, events); 1924 1925 rps->pm_iir |= events; 1926 queue_work(gt->i915->unordered_wq, &rps->work); 1927 } 1928 1929 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1930 { 1931 struct intel_gt *gt = rps_to_gt(rps); 1932 u32 events; 1933 1934 events = pm_iir & rps->pm_events; 1935 if (events) { 1936 spin_lock(gt->irq_lock); 1937 1938 GT_TRACE(gt, "irq events:%x\n", events); 1939 1940 gen6_gt_pm_mask_irq(gt, events); 1941 rps->pm_iir |= events; 1942 1943 queue_work(gt->i915->unordered_wq, &rps->work); 1944 spin_unlock(gt->irq_lock); 1945 } 1946 1947 if (GRAPHICS_VER(gt->i915) >= 8) 1948 return; 1949 1950 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1951 intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10); 1952 1953 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1954 drm_dbg(&rps_to_i915(rps)->drm, 1955 "Command parser error, pm_iir 0x%08x\n", pm_iir); 1956 } 1957 1958 void gen5_rps_irq_handler(struct intel_rps *rps) 1959 { 1960 struct intel_uncore *uncore = rps_to_uncore(rps); 1961 u32 busy_up, busy_down, max_avg, min_avg; 1962 u8 new_freq; 1963 1964 spin_lock(&mchdev_lock); 1965 1966 intel_uncore_write16(uncore, 1967 MEMINTRSTS, 1968 intel_uncore_read(uncore, MEMINTRSTS)); 1969 1970 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 1971 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); 1972 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); 1973 max_avg = intel_uncore_read(uncore, RCBMAXAVG); 1974 min_avg = intel_uncore_read(uncore, RCBMINAVG); 1975 1976 /* Handle RCS change request from hw */ 1977 new_freq = rps->cur_freq; 1978 if (busy_up > max_avg) 1979 new_freq++; 1980 else if (busy_down < min_avg) 1981 new_freq--; 1982 new_freq = clamp(new_freq, 1983 rps->min_freq_softlimit, 1984 rps->max_freq_softlimit); 1985 1986 if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq)) 1987 rps->cur_freq = new_freq; 1988 1989 spin_unlock(&mchdev_lock); 1990 } 1991 1992 void intel_rps_init_early(struct intel_rps *rps) 1993 { 1994 mutex_init(&rps->lock); 1995 mutex_init(&rps->power.mutex); 1996 1997 INIT_WORK(&rps->work, rps_work); 1998 timer_setup(&rps->timer, rps_timer, 0); 1999 2000 atomic_set(&rps->num_waiters, 0); 2001 } 2002 2003 void intel_rps_init(struct intel_rps *rps) 2004 { 2005 struct drm_i915_private *i915 = rps_to_i915(rps); 2006 2007 if (rps_uses_slpc(rps)) 2008 return; 2009 2010 if (IS_CHERRYVIEW(i915)) 2011 chv_rps_init(rps); 2012 else if (IS_VALLEYVIEW(i915)) 2013 vlv_rps_init(rps); 2014 else if (GRAPHICS_VER(i915) >= 6) 2015 gen6_rps_init(rps); 2016 else if (IS_IRONLAKE_M(i915)) 2017 gen5_rps_init(rps); 2018 2019 /* Derive initial user preferences/limits from the hardware limits */ 2020 rps->max_freq_softlimit = rps->max_freq; 2021 rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit; 2022 rps->min_freq_softlimit = rps->min_freq; 2023 rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit; 2024 2025 /* After setting max-softlimit, find the overclock max freq */ 2026 if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { 2027 u32 params = 0; 2028 2029 snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, ¶ms, NULL); 2030 if (params & BIT(31)) { /* OC supported */ 2031 drm_dbg(&i915->drm, 2032 "Overclocking supported, max: %dMHz, overclock: %dMHz\n", 2033 (rps->max_freq & 0xff) * 50, 2034 (params & 0xff) * 50); 2035 rps->max_freq = params & 0xff; 2036 } 2037 } 2038 2039 /* Set default thresholds in % */ 2040 rps->power.up_threshold = 95; 2041 rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold; 2042 rps->power.down_threshold = 85; 2043 rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold; 2044 2045 /* Finally allow us to boost to max by default */ 2046 rps->boost_freq = rps->max_freq; 2047 rps->idle_freq = rps->min_freq; 2048 2049 /* Start in the middle, from here we will autotune based on workload */ 2050 rps->cur_freq = rps->efficient_freq; 2051 2052 rps->pm_intrmsk_mbz = 0; 2053 2054 /* 2055 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 2056 * if GEN6_PM_UP_EI_EXPIRED is masked. 2057 * 2058 * TODO: verify if this can be reproduced on VLV,CHV. 2059 */ 2060 if (GRAPHICS_VER(i915) <= 7) 2061 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 2062 2063 if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11) 2064 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 2065 2066 /* GuC needs ARAT expired interrupt unmasked */ 2067 if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc)) 2068 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 2069 } 2070 2071 void intel_rps_sanitize(struct intel_rps *rps) 2072 { 2073 if (rps_uses_slpc(rps)) 2074 return; 2075 2076 if (GRAPHICS_VER(rps_to_i915(rps)) >= 6) 2077 rps_disable_interrupts(rps); 2078 } 2079 2080 u32 intel_rps_read_rpstat(struct intel_rps *rps) 2081 { 2082 struct drm_i915_private *i915 = rps_to_i915(rps); 2083 i915_reg_t rpstat; 2084 2085 rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1; 2086 2087 return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat); 2088 } 2089 2090 static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) 2091 { 2092 struct drm_i915_private *i915 = rps_to_i915(rps); 2093 u32 cagf; 2094 2095 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 2096 cagf = REG_FIELD_GET(MTL_CAGF_MASK, rpstat); 2097 else if (GRAPHICS_VER(i915) >= 12) 2098 cagf = REG_FIELD_GET(GEN12_CAGF_MASK, rpstat); 2099 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 2100 cagf = REG_FIELD_GET(RPE_MASK, rpstat); 2101 else if (GRAPHICS_VER(i915) >= 9) 2102 cagf = REG_FIELD_GET(GEN9_CAGF_MASK, rpstat); 2103 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2104 cagf = REG_FIELD_GET(HSW_CAGF_MASK, rpstat); 2105 else if (GRAPHICS_VER(i915) >= 6) 2106 cagf = REG_FIELD_GET(GEN6_CAGF_MASK, rpstat); 2107 else 2108 cagf = gen5_invert_freq(rps, REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rpstat)); 2109 2110 return cagf; 2111 } 2112 2113 static u32 __read_cagf(struct intel_rps *rps, bool take_fw) 2114 { 2115 struct drm_i915_private *i915 = rps_to_i915(rps); 2116 struct intel_uncore *uncore = rps_to_uncore(rps); 2117 i915_reg_t r = INVALID_MMIO_REG; 2118 u32 freq; 2119 2120 /* 2121 * For Gen12+ reading freq from HW does not need a forcewake and 2122 * registers will return 0 freq when GT is in RC6 2123 */ 2124 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) { 2125 r = MTL_MIRROR_TARGET_WP1; 2126 } else if (GRAPHICS_VER(i915) >= 12) { 2127 r = GEN12_RPSTAT1; 2128 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 2129 vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT)); 2130 freq = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS); 2131 vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT)); 2132 } else if (GRAPHICS_VER(i915) >= 6) { 2133 r = GEN6_RPSTAT1; 2134 } else { 2135 r = MEMSTAT_ILK; 2136 } 2137 2138 if (i915_mmio_reg_valid(r)) 2139 freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r); 2140 2141 return intel_rps_get_cagf(rps, freq); 2142 } 2143 2144 static u32 read_cagf(struct intel_rps *rps) 2145 { 2146 return __read_cagf(rps, true); 2147 } 2148 2149 u32 intel_rps_read_actual_frequency(struct intel_rps *rps) 2150 { 2151 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 2152 intel_wakeref_t wakeref; 2153 u32 freq = 0; 2154 2155 with_intel_runtime_pm_if_in_use(rpm, wakeref) 2156 freq = intel_gpu_freq(rps, read_cagf(rps)); 2157 2158 return freq; 2159 } 2160 2161 u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps) 2162 { 2163 return intel_gpu_freq(rps, __read_cagf(rps, false)); 2164 } 2165 2166 static u32 intel_rps_read_punit_req(struct intel_rps *rps) 2167 { 2168 struct intel_uncore *uncore = rps_to_uncore(rps); 2169 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 2170 intel_wakeref_t wakeref; 2171 u32 freq = 0; 2172 2173 with_intel_runtime_pm_if_in_use(rpm, wakeref) 2174 freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); 2175 2176 return freq; 2177 } 2178 2179 static u32 intel_rps_get_req(u32 pureq) 2180 { 2181 u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT; 2182 2183 return req; 2184 } 2185 2186 u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps) 2187 { 2188 u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps)); 2189 2190 return intel_gpu_freq(rps, freq); 2191 } 2192 2193 u32 intel_rps_get_requested_frequency(struct intel_rps *rps) 2194 { 2195 if (rps_uses_slpc(rps)) 2196 return intel_rps_read_punit_req_frequency(rps); 2197 else 2198 return intel_gpu_freq(rps, rps->cur_freq); 2199 } 2200 2201 u32 intel_rps_get_max_frequency(struct intel_rps *rps) 2202 { 2203 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2204 2205 if (rps_uses_slpc(rps)) 2206 return slpc->max_freq_softlimit; 2207 else 2208 return intel_gpu_freq(rps, rps->max_freq_softlimit); 2209 } 2210 2211 /** 2212 * intel_rps_get_max_raw_freq - returns the max frequency in some raw format. 2213 * @rps: the intel_rps structure 2214 * 2215 * Returns the max frequency in a raw format. In newer platforms raw is in 2216 * units of 50 MHz. 2217 */ 2218 u32 intel_rps_get_max_raw_freq(struct intel_rps *rps) 2219 { 2220 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2221 u32 freq; 2222 2223 if (rps_uses_slpc(rps)) { 2224 return DIV_ROUND_CLOSEST(slpc->rp0_freq, 2225 GT_FREQUENCY_MULTIPLIER); 2226 } else { 2227 freq = rps->max_freq; 2228 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 2229 /* Convert GT frequency to 50 MHz units */ 2230 freq /= GEN9_FREQ_SCALER; 2231 } 2232 return freq; 2233 } 2234 } 2235 2236 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps) 2237 { 2238 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2239 2240 if (rps_uses_slpc(rps)) 2241 return slpc->rp0_freq; 2242 else 2243 return intel_gpu_freq(rps, rps->rp0_freq); 2244 } 2245 2246 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps) 2247 { 2248 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2249 2250 if (rps_uses_slpc(rps)) 2251 return slpc->rp1_freq; 2252 else 2253 return intel_gpu_freq(rps, rps->rp1_freq); 2254 } 2255 2256 u32 intel_rps_get_rpn_frequency(struct intel_rps *rps) 2257 { 2258 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2259 2260 if (rps_uses_slpc(rps)) 2261 return slpc->min_freq; 2262 else 2263 return intel_gpu_freq(rps, rps->min_freq); 2264 } 2265 2266 static void rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2267 { 2268 struct intel_gt *gt = rps_to_gt(rps); 2269 struct drm_i915_private *i915 = gt->i915; 2270 struct intel_uncore *uncore = gt->uncore; 2271 struct intel_rps_freq_caps caps; 2272 u32 rp_state_limits; 2273 u32 gt_perf_status; 2274 u32 rpmodectl, rpinclimit, rpdeclimit; 2275 u32 rpstat, cagf, reqf; 2276 u32 rpcurupei, rpcurup, rpprevup; 2277 u32 rpcurdownei, rpcurdown, rpprevdown; 2278 u32 rpupei, rpupt, rpdownei, rpdownt; 2279 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 2280 2281 rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS); 2282 gen6_rps_get_freq_caps(rps, &caps); 2283 if (IS_GEN9_LP(i915)) 2284 gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS); 2285 else 2286 gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS); 2287 2288 /* RPSTAT1 is in the GT power well */ 2289 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 2290 2291 reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ); 2292 if (GRAPHICS_VER(i915) >= 9) { 2293 reqf >>= 23; 2294 } else { 2295 reqf &= ~GEN6_TURBO_DISABLE; 2296 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2297 reqf >>= 24; 2298 else 2299 reqf >>= 25; 2300 } 2301 reqf = intel_gpu_freq(rps, reqf); 2302 2303 rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); 2304 rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 2305 rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 2306 2307 rpstat = intel_rps_read_rpstat(rps); 2308 rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 2309 rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 2310 rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 2311 rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 2312 rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 2313 rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 2314 2315 rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI); 2316 rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 2317 2318 rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); 2319 rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 2320 2321 cagf = intel_rps_read_actual_frequency(rps); 2322 2323 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 2324 2325 if (GRAPHICS_VER(i915) >= 11) { 2326 pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); 2327 pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); 2328 /* 2329 * The equivalent to the PM ISR & IIR cannot be read 2330 * without affecting the current state of the system 2331 */ 2332 pm_isr = 0; 2333 pm_iir = 0; 2334 } else if (GRAPHICS_VER(i915) >= 8) { 2335 pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2)); 2336 pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2)); 2337 pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2)); 2338 pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2)); 2339 } else { 2340 pm_ier = intel_uncore_read(uncore, GEN6_PMIER); 2341 pm_imr = intel_uncore_read(uncore, GEN6_PMIMR); 2342 pm_isr = intel_uncore_read(uncore, GEN6_PMISR); 2343 pm_iir = intel_uncore_read(uncore, GEN6_PMIIR); 2344 } 2345 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 2346 2347 drm_printf(p, "Video Turbo Mode: %s\n", 2348 str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO)); 2349 drm_printf(p, "HW control enabled: %s\n", 2350 str_yes_no(rpmodectl & GEN6_RP_ENABLE)); 2351 drm_printf(p, "SW control enabled: %s\n", 2352 str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); 2353 2354 drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", 2355 pm_ier, pm_imr, pm_mask); 2356 if (GRAPHICS_VER(i915) <= 10) 2357 drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n", 2358 pm_isr, pm_iir); 2359 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 2360 rps->pm_intrmsk_mbz); 2361 drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 2362 drm_printf(p, "Render p-state ratio: %d\n", 2363 (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 2364 drm_printf(p, "Render p-state VID: %d\n", 2365 gt_perf_status & 0xff); 2366 drm_printf(p, "Render p-state limit: %d\n", 2367 rp_state_limits & 0xff); 2368 drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat); 2369 drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl); 2370 drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit); 2371 drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 2372 drm_printf(p, "RPNSWREQ: %dMHz\n", reqf); 2373 drm_printf(p, "CAGF: %dMHz\n", cagf); 2374 drm_printf(p, "RP CUR UP EI: %d (%lldns)\n", 2375 rpcurupei, 2376 intel_gt_pm_interval_to_ns(gt, rpcurupei)); 2377 drm_printf(p, "RP CUR UP: %d (%lldns)\n", 2378 rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup)); 2379 drm_printf(p, "RP PREV UP: %d (%lldns)\n", 2380 rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup)); 2381 drm_printf(p, "Up threshold: %d%%\n", 2382 rps->power.up_threshold); 2383 drm_printf(p, "RP UP EI: %d (%lldns)\n", 2384 rpupei, intel_gt_pm_interval_to_ns(gt, rpupei)); 2385 drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n", 2386 rpupt, intel_gt_pm_interval_to_ns(gt, rpupt)); 2387 2388 drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n", 2389 rpcurdownei, 2390 intel_gt_pm_interval_to_ns(gt, rpcurdownei)); 2391 drm_printf(p, "RP CUR DOWN: %d (%lldns)\n", 2392 rpcurdown, 2393 intel_gt_pm_interval_to_ns(gt, rpcurdown)); 2394 drm_printf(p, "RP PREV DOWN: %d (%lldns)\n", 2395 rpprevdown, 2396 intel_gt_pm_interval_to_ns(gt, rpprevdown)); 2397 drm_printf(p, "Down threshold: %d%%\n", 2398 rps->power.down_threshold); 2399 drm_printf(p, "RP DOWN EI: %d (%lldns)\n", 2400 rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei)); 2401 drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n", 2402 rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); 2403 2404 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 2405 intel_gpu_freq(rps, caps.min_freq)); 2406 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 2407 intel_gpu_freq(rps, caps.rp1_freq)); 2408 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 2409 intel_gpu_freq(rps, caps.rp0_freq)); 2410 drm_printf(p, "Max overclocked frequency: %dMHz\n", 2411 intel_gpu_freq(rps, rps->max_freq)); 2412 2413 drm_printf(p, "Current freq: %d MHz\n", 2414 intel_gpu_freq(rps, rps->cur_freq)); 2415 drm_printf(p, "Actual freq: %d MHz\n", cagf); 2416 drm_printf(p, "Idle freq: %d MHz\n", 2417 intel_gpu_freq(rps, rps->idle_freq)); 2418 drm_printf(p, "Min freq: %d MHz\n", 2419 intel_gpu_freq(rps, rps->min_freq)); 2420 drm_printf(p, "Boost freq: %d MHz\n", 2421 intel_gpu_freq(rps, rps->boost_freq)); 2422 drm_printf(p, "Max freq: %d MHz\n", 2423 intel_gpu_freq(rps, rps->max_freq)); 2424 drm_printf(p, 2425 "efficient (RPe) frequency: %d MHz\n", 2426 intel_gpu_freq(rps, rps->efficient_freq)); 2427 } 2428 2429 static void slpc_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2430 { 2431 struct intel_gt *gt = rps_to_gt(rps); 2432 struct intel_uncore *uncore = gt->uncore; 2433 struct intel_rps_freq_caps caps; 2434 u32 pm_mask; 2435 2436 gen6_rps_get_freq_caps(rps, &caps); 2437 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 2438 2439 drm_printf(p, "PM MASK=0x%08x\n", pm_mask); 2440 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 2441 rps->pm_intrmsk_mbz); 2442 drm_printf(p, "RPSTAT1: 0x%08x\n", intel_rps_read_rpstat(rps)); 2443 drm_printf(p, "RPNSWREQ: %dMHz\n", intel_rps_get_requested_frequency(rps)); 2444 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 2445 intel_gpu_freq(rps, caps.min_freq)); 2446 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 2447 intel_gpu_freq(rps, caps.rp1_freq)); 2448 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 2449 intel_gpu_freq(rps, caps.rp0_freq)); 2450 drm_printf(p, "Current freq: %d MHz\n", 2451 intel_rps_get_requested_frequency(rps)); 2452 drm_printf(p, "Actual freq: %d MHz\n", 2453 intel_rps_read_actual_frequency(rps)); 2454 drm_printf(p, "Min freq: %d MHz\n", 2455 intel_rps_get_min_frequency(rps)); 2456 drm_printf(p, "Boost freq: %d MHz\n", 2457 intel_rps_get_boost_frequency(rps)); 2458 drm_printf(p, "Max freq: %d MHz\n", 2459 intel_rps_get_max_frequency(rps)); 2460 drm_printf(p, 2461 "efficient (RPe) frequency: %d MHz\n", 2462 intel_gpu_freq(rps, caps.rp1_freq)); 2463 } 2464 2465 void gen6_rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2466 { 2467 if (rps_uses_slpc(rps)) 2468 return slpc_frequency_dump(rps, p); 2469 else 2470 return rps_frequency_dump(rps, p); 2471 } 2472 2473 static int set_max_freq(struct intel_rps *rps, u32 val) 2474 { 2475 struct drm_i915_private *i915 = rps_to_i915(rps); 2476 int ret = 0; 2477 2478 mutex_lock(&rps->lock); 2479 2480 val = intel_freq_opcode(rps, val); 2481 if (val < rps->min_freq || 2482 val > rps->max_freq || 2483 val < rps->min_freq_softlimit) { 2484 ret = -EINVAL; 2485 goto unlock; 2486 } 2487 2488 if (val > rps->rp0_freq) 2489 drm_dbg(&i915->drm, "User requested overclocking to %d\n", 2490 intel_gpu_freq(rps, val)); 2491 2492 rps->max_freq_softlimit = val; 2493 2494 val = clamp_t(int, rps->cur_freq, 2495 rps->min_freq_softlimit, 2496 rps->max_freq_softlimit); 2497 2498 /* 2499 * We still need *_set_rps to process the new max_delay and 2500 * update the interrupt limits and PMINTRMSK even though 2501 * frequency request may be unchanged. 2502 */ 2503 intel_rps_set(rps, val); 2504 2505 unlock: 2506 mutex_unlock(&rps->lock); 2507 2508 return ret; 2509 } 2510 2511 int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val) 2512 { 2513 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2514 2515 if (rps_uses_slpc(rps)) 2516 return intel_guc_slpc_set_max_freq(slpc, val); 2517 else 2518 return set_max_freq(rps, val); 2519 } 2520 2521 u32 intel_rps_get_min_frequency(struct intel_rps *rps) 2522 { 2523 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2524 2525 if (rps_uses_slpc(rps)) 2526 return slpc->min_freq_softlimit; 2527 else 2528 return intel_gpu_freq(rps, rps->min_freq_softlimit); 2529 } 2530 2531 /** 2532 * intel_rps_get_min_raw_freq - returns the min frequency in some raw format. 2533 * @rps: the intel_rps structure 2534 * 2535 * Returns the min frequency in a raw format. In newer platforms raw is in 2536 * units of 50 MHz. 2537 */ 2538 u32 intel_rps_get_min_raw_freq(struct intel_rps *rps) 2539 { 2540 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2541 u32 freq; 2542 2543 if (rps_uses_slpc(rps)) { 2544 return DIV_ROUND_CLOSEST(slpc->min_freq, 2545 GT_FREQUENCY_MULTIPLIER); 2546 } else { 2547 freq = rps->min_freq; 2548 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 2549 /* Convert GT frequency to 50 MHz units */ 2550 freq /= GEN9_FREQ_SCALER; 2551 } 2552 return freq; 2553 } 2554 } 2555 2556 static int set_min_freq(struct intel_rps *rps, u32 val) 2557 { 2558 int ret = 0; 2559 2560 mutex_lock(&rps->lock); 2561 2562 val = intel_freq_opcode(rps, val); 2563 if (val < rps->min_freq || 2564 val > rps->max_freq || 2565 val > rps->max_freq_softlimit) { 2566 ret = -EINVAL; 2567 goto unlock; 2568 } 2569 2570 rps->min_freq_softlimit = val; 2571 2572 val = clamp_t(int, rps->cur_freq, 2573 rps->min_freq_softlimit, 2574 rps->max_freq_softlimit); 2575 2576 /* 2577 * We still need *_set_rps to process the new min_delay and 2578 * update the interrupt limits and PMINTRMSK even though 2579 * frequency request may be unchanged. 2580 */ 2581 intel_rps_set(rps, val); 2582 2583 unlock: 2584 mutex_unlock(&rps->lock); 2585 2586 return ret; 2587 } 2588 2589 int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val) 2590 { 2591 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2592 2593 if (rps_uses_slpc(rps)) 2594 return intel_guc_slpc_set_min_freq(slpc, val); 2595 else 2596 return set_min_freq(rps, val); 2597 } 2598 2599 u8 intel_rps_get_up_threshold(struct intel_rps *rps) 2600 { 2601 return rps->power.up_threshold; 2602 } 2603 2604 static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val) 2605 { 2606 int ret; 2607 2608 if (val > 100) 2609 return -EINVAL; 2610 2611 ret = mutex_lock_interruptible(&rps->lock); 2612 if (ret) 2613 return ret; 2614 2615 if (*threshold == val) 2616 goto out_unlock; 2617 2618 *threshold = val; 2619 2620 /* Force reset. */ 2621 rps->last_freq = -1; 2622 mutex_lock(&rps->power.mutex); 2623 rps->power.mode = -1; 2624 mutex_unlock(&rps->power.mutex); 2625 2626 intel_rps_set(rps, clamp(rps->cur_freq, 2627 rps->min_freq_softlimit, 2628 rps->max_freq_softlimit)); 2629 2630 out_unlock: 2631 mutex_unlock(&rps->lock); 2632 2633 return ret; 2634 } 2635 2636 int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold) 2637 { 2638 return rps_set_threshold(rps, &rps->power.up_threshold, threshold); 2639 } 2640 2641 u8 intel_rps_get_down_threshold(struct intel_rps *rps) 2642 { 2643 return rps->power.down_threshold; 2644 } 2645 2646 int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold) 2647 { 2648 return rps_set_threshold(rps, &rps->power.down_threshold, threshold); 2649 } 2650 2651 static void intel_rps_set_manual(struct intel_rps *rps, bool enable) 2652 { 2653 struct intel_uncore *uncore = rps_to_uncore(rps); 2654 u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE; 2655 2656 /* Allow punit to process software requests */ 2657 intel_uncore_write(uncore, GEN6_RP_CONTROL, state); 2658 } 2659 2660 void intel_rps_raise_unslice(struct intel_rps *rps) 2661 { 2662 struct intel_uncore *uncore = rps_to_uncore(rps); 2663 2664 mutex_lock(&rps->lock); 2665 2666 if (rps_uses_slpc(rps)) { 2667 /* RP limits have not been initialized yet for SLPC path */ 2668 struct intel_rps_freq_caps caps; 2669 2670 gen6_rps_get_freq_caps(rps, &caps); 2671 2672 intel_rps_set_manual(rps, true); 2673 intel_uncore_write(uncore, GEN6_RPNSWREQ, 2674 ((caps.rp0_freq << 2675 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | 2676 GEN9_IGNORE_SLICE_RATIO)); 2677 intel_rps_set_manual(rps, false); 2678 } else { 2679 intel_rps_set(rps, rps->rp0_freq); 2680 } 2681 2682 mutex_unlock(&rps->lock); 2683 } 2684 2685 void intel_rps_lower_unslice(struct intel_rps *rps) 2686 { 2687 struct intel_uncore *uncore = rps_to_uncore(rps); 2688 2689 mutex_lock(&rps->lock); 2690 2691 if (rps_uses_slpc(rps)) { 2692 /* RP limits have not been initialized yet for SLPC path */ 2693 struct intel_rps_freq_caps caps; 2694 2695 gen6_rps_get_freq_caps(rps, &caps); 2696 2697 intel_rps_set_manual(rps, true); 2698 intel_uncore_write(uncore, GEN6_RPNSWREQ, 2699 ((caps.min_freq << 2700 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | 2701 GEN9_IGNORE_SLICE_RATIO)); 2702 intel_rps_set_manual(rps, false); 2703 } else { 2704 intel_rps_set(rps, rps->min_freq); 2705 } 2706 2707 mutex_unlock(&rps->lock); 2708 } 2709 2710 static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32) 2711 { 2712 struct intel_gt *gt = rps_to_gt(rps); 2713 intel_wakeref_t wakeref; 2714 u32 val; 2715 2716 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 2717 val = intel_uncore_read(gt->uncore, reg32); 2718 2719 return val; 2720 } 2721 2722 bool rps_read_mask_mmio(struct intel_rps *rps, 2723 i915_reg_t reg32, u32 mask) 2724 { 2725 return rps_read_mmio(rps, reg32) & mask; 2726 } 2727 2728 /* External interface for intel_ips.ko */ 2729 2730 static struct drm_i915_private __rcu *ips_mchdev; 2731 2732 /* 2733 * Tells the intel_ips driver that the i915 driver is now loaded, if 2734 * IPS got loaded first. 2735 * 2736 * This awkward dance is so that neither module has to depend on the 2737 * other in order for IPS to do the appropriate communication of 2738 * GPU turbo limits to i915. 2739 */ 2740 static void 2741 ips_ping_for_i915_load(void) 2742 { 2743 void (*link)(void); 2744 2745 link = symbol_get(ips_link_to_i915_driver); 2746 if (link) { 2747 link(); 2748 symbol_put(ips_link_to_i915_driver); 2749 } 2750 } 2751 2752 void intel_rps_driver_register(struct intel_rps *rps) 2753 { 2754 struct intel_gt *gt = rps_to_gt(rps); 2755 2756 /* 2757 * We only register the i915 ips part with intel-ips once everything is 2758 * set up, to avoid intel-ips sneaking in and reading bogus values. 2759 */ 2760 if (GRAPHICS_VER(gt->i915) == 5) { 2761 GEM_BUG_ON(ips_mchdev); 2762 rcu_assign_pointer(ips_mchdev, gt->i915); 2763 ips_ping_for_i915_load(); 2764 } 2765 } 2766 2767 void intel_rps_driver_unregister(struct intel_rps *rps) 2768 { 2769 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps)) 2770 rcu_assign_pointer(ips_mchdev, NULL); 2771 } 2772 2773 static struct drm_i915_private *mchdev_get(void) 2774 { 2775 struct drm_i915_private *i915; 2776 2777 rcu_read_lock(); 2778 i915 = rcu_dereference(ips_mchdev); 2779 if (i915 && !kref_get_unless_zero(&i915->drm.ref)) 2780 i915 = NULL; 2781 rcu_read_unlock(); 2782 2783 return i915; 2784 } 2785 2786 /** 2787 * i915_read_mch_val - return value for IPS use 2788 * 2789 * Calculate and return a value for the IPS driver to use when deciding whether 2790 * we have thermal and power headroom to increase CPU or GPU power budget. 2791 */ 2792 unsigned long i915_read_mch_val(void) 2793 { 2794 struct drm_i915_private *i915; 2795 unsigned long chipset_val = 0; 2796 unsigned long graphics_val = 0; 2797 intel_wakeref_t wakeref; 2798 2799 i915 = mchdev_get(); 2800 if (!i915) 2801 return 0; 2802 2803 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 2804 struct intel_ips *ips = &to_gt(i915)->rps.ips; 2805 2806 spin_lock_irq(&mchdev_lock); 2807 chipset_val = __ips_chipset_val(ips); 2808 graphics_val = __ips_gfx_val(ips); 2809 spin_unlock_irq(&mchdev_lock); 2810 } 2811 2812 drm_dev_put(&i915->drm); 2813 return chipset_val + graphics_val; 2814 } 2815 EXPORT_SYMBOL_GPL(i915_read_mch_val); 2816 2817 /** 2818 * i915_gpu_raise - raise GPU frequency limit 2819 * 2820 * Raise the limit; IPS indicates we have thermal headroom. 2821 */ 2822 bool i915_gpu_raise(void) 2823 { 2824 struct drm_i915_private *i915; 2825 struct intel_rps *rps; 2826 2827 i915 = mchdev_get(); 2828 if (!i915) 2829 return false; 2830 2831 rps = &to_gt(i915)->rps; 2832 2833 spin_lock_irq(&mchdev_lock); 2834 if (rps->max_freq_softlimit < rps->max_freq) 2835 rps->max_freq_softlimit++; 2836 spin_unlock_irq(&mchdev_lock); 2837 2838 drm_dev_put(&i915->drm); 2839 return true; 2840 } 2841 EXPORT_SYMBOL_GPL(i915_gpu_raise); 2842 2843 /** 2844 * i915_gpu_lower - lower GPU frequency limit 2845 * 2846 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2847 * frequency maximum. 2848 */ 2849 bool i915_gpu_lower(void) 2850 { 2851 struct drm_i915_private *i915; 2852 struct intel_rps *rps; 2853 2854 i915 = mchdev_get(); 2855 if (!i915) 2856 return false; 2857 2858 rps = &to_gt(i915)->rps; 2859 2860 spin_lock_irq(&mchdev_lock); 2861 if (rps->max_freq_softlimit > rps->min_freq) 2862 rps->max_freq_softlimit--; 2863 spin_unlock_irq(&mchdev_lock); 2864 2865 drm_dev_put(&i915->drm); 2866 return true; 2867 } 2868 EXPORT_SYMBOL_GPL(i915_gpu_lower); 2869 2870 /** 2871 * i915_gpu_busy - indicate GPU business to IPS 2872 * 2873 * Tell the IPS driver whether or not the GPU is busy. 2874 */ 2875 bool i915_gpu_busy(void) 2876 { 2877 struct drm_i915_private *i915; 2878 bool ret; 2879 2880 i915 = mchdev_get(); 2881 if (!i915) 2882 return false; 2883 2884 ret = to_gt(i915)->awake; 2885 2886 drm_dev_put(&i915->drm); 2887 return ret; 2888 } 2889 EXPORT_SYMBOL_GPL(i915_gpu_busy); 2890 2891 /** 2892 * i915_gpu_turbo_disable - disable graphics turbo 2893 * 2894 * Disable graphics turbo by resetting the max frequency and setting the 2895 * current frequency to the default. 2896 */ 2897 bool i915_gpu_turbo_disable(void) 2898 { 2899 struct drm_i915_private *i915; 2900 struct intel_rps *rps; 2901 bool ret; 2902 2903 i915 = mchdev_get(); 2904 if (!i915) 2905 return false; 2906 2907 rps = &to_gt(i915)->rps; 2908 2909 spin_lock_irq(&mchdev_lock); 2910 rps->max_freq_softlimit = rps->min_freq; 2911 ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq); 2912 spin_unlock_irq(&mchdev_lock); 2913 2914 drm_dev_put(&i915->drm); 2915 return ret; 2916 } 2917 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2918 2919 static void boost_if_not_started(struct dma_fence *fence) 2920 { 2921 struct i915_request *rq; 2922 2923 if (!dma_fence_is_i915(fence)) 2924 return; 2925 2926 rq = to_request(fence); 2927 2928 if (!i915_request_started(rq)) 2929 intel_rps_boost(rq); 2930 } 2931 2932 static void mark_interactive(struct drm_device *drm, bool interactive) 2933 { 2934 struct drm_i915_private *i915 = to_i915(drm); 2935 2936 intel_rps_mark_interactive(&to_gt(i915)->rps, interactive); 2937 } 2938 2939 static void ilk_irq_handler(struct drm_device *drm) 2940 { 2941 struct drm_i915_private *i915 = to_i915(drm); 2942 2943 gen5_rps_irq_handler(&to_gt(i915)->rps); 2944 } 2945 2946 const struct intel_display_rps_interface i915_display_rps_interface = { 2947 .boost_if_not_started = boost_if_not_started, 2948 .mark_interactive = mark_interactive, 2949 .ilk_irq_handler = ilk_irq_handler, 2950 }; 2951 2952 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2953 #include "selftest_rps.c" 2954 #include "selftest_slpc.c" 2955 #endif 2956