1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <drm/i915_drm.h> 8 9 #include "i915_drv.h" 10 #include "intel_gt.h" 11 #include "intel_gt_clock_utils.h" 12 #include "intel_gt_irq.h" 13 #include "intel_gt_pm_irq.h" 14 #include "intel_rps.h" 15 #include "intel_sideband.h" 16 #include "../../../platform/x86/intel_ips.h" 17 18 #define BUSY_MAX_EI 20u /* ms */ 19 20 /* 21 * Lock protecting IPS related data structures 22 */ 23 static DEFINE_SPINLOCK(mchdev_lock); 24 25 static struct intel_gt *rps_to_gt(struct intel_rps *rps) 26 { 27 return container_of(rps, struct intel_gt, rps); 28 } 29 30 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) 31 { 32 return rps_to_gt(rps)->i915; 33 } 34 35 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) 36 { 37 return rps_to_gt(rps)->uncore; 38 } 39 40 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) 41 { 42 return mask & ~rps->pm_intrmsk_mbz; 43 } 44 45 static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 46 { 47 intel_uncore_write_fw(uncore, reg, val); 48 } 49 50 static void rps_timer(struct timer_list *t) 51 { 52 struct intel_rps *rps = from_timer(rps, t, timer); 53 struct intel_engine_cs *engine; 54 ktime_t dt, last, timestamp; 55 enum intel_engine_id id; 56 s64 max_busy[3] = {}; 57 58 timestamp = 0; 59 for_each_engine(engine, rps_to_gt(rps), id) { 60 s64 busy; 61 int i; 62 63 dt = intel_engine_get_busy_time(engine, ×tamp); 64 last = engine->stats.rps; 65 engine->stats.rps = dt; 66 67 busy = ktime_to_ns(ktime_sub(dt, last)); 68 for (i = 0; i < ARRAY_SIZE(max_busy); i++) { 69 if (busy > max_busy[i]) 70 swap(busy, max_busy[i]); 71 } 72 } 73 last = rps->pm_timestamp; 74 rps->pm_timestamp = timestamp; 75 76 if (intel_rps_is_active(rps)) { 77 s64 busy; 78 int i; 79 80 dt = ktime_sub(timestamp, last); 81 82 /* 83 * Our goal is to evaluate each engine independently, so we run 84 * at the lowest clocks required to sustain the heaviest 85 * workload. However, a task may be split into sequential 86 * dependent operations across a set of engines, such that 87 * the independent contributions do not account for high load, 88 * but overall the task is GPU bound. For example, consider 89 * video decode on vcs followed by colour post-processing 90 * on vecs, followed by general post-processing on rcs. 91 * Since multi-engines being active does imply a single 92 * continuous workload across all engines, we hedge our 93 * bets by only contributing a factor of the distributed 94 * load into our busyness calculation. 95 */ 96 busy = max_busy[0]; 97 for (i = 1; i < ARRAY_SIZE(max_busy); i++) { 98 if (!max_busy[i]) 99 break; 100 101 busy += div_u64(max_busy[i], 1 << i); 102 } 103 GT_TRACE(rps_to_gt(rps), 104 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", 105 busy, (int)div64_u64(100 * busy, dt), 106 max_busy[0], max_busy[1], max_busy[2], 107 rps->pm_interval); 108 109 if (100 * busy > rps->power.up_threshold * dt && 110 rps->cur_freq < rps->max_freq_softlimit) { 111 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; 112 rps->pm_interval = 1; 113 schedule_work(&rps->work); 114 } else if (100 * busy < rps->power.down_threshold * dt && 115 rps->cur_freq > rps->min_freq_softlimit) { 116 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; 117 rps->pm_interval = 1; 118 schedule_work(&rps->work); 119 } else { 120 rps->last_adj = 0; 121 } 122 123 mod_timer(&rps->timer, 124 jiffies + msecs_to_jiffies(rps->pm_interval)); 125 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); 126 } 127 } 128 129 static void rps_start_timer(struct intel_rps *rps) 130 { 131 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 132 rps->pm_interval = 1; 133 mod_timer(&rps->timer, jiffies + 1); 134 } 135 136 static void rps_stop_timer(struct intel_rps *rps) 137 { 138 del_timer_sync(&rps->timer); 139 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 140 cancel_work_sync(&rps->work); 141 } 142 143 static u32 rps_pm_mask(struct intel_rps *rps, u8 val) 144 { 145 u32 mask = 0; 146 147 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ 148 if (val > rps->min_freq_softlimit) 149 mask |= (GEN6_PM_RP_UP_EI_EXPIRED | 150 GEN6_PM_RP_DOWN_THRESHOLD | 151 GEN6_PM_RP_DOWN_TIMEOUT); 152 153 if (val < rps->max_freq_softlimit) 154 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 155 156 mask &= rps->pm_events; 157 158 return rps_pm_sanitize_mask(rps, ~mask); 159 } 160 161 static void rps_reset_ei(struct intel_rps *rps) 162 { 163 memset(&rps->ei, 0, sizeof(rps->ei)); 164 } 165 166 static void rps_enable_interrupts(struct intel_rps *rps) 167 { 168 struct intel_gt *gt = rps_to_gt(rps); 169 170 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", 171 rps->pm_events, rps_pm_mask(rps, rps->last_freq)); 172 173 rps_reset_ei(rps); 174 175 spin_lock_irq(>->irq_lock); 176 gen6_gt_pm_enable_irq(gt, rps->pm_events); 177 spin_unlock_irq(>->irq_lock); 178 179 intel_uncore_write(gt->uncore, 180 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); 181 } 182 183 static void gen6_rps_reset_interrupts(struct intel_rps *rps) 184 { 185 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); 186 } 187 188 static void gen11_rps_reset_interrupts(struct intel_rps *rps) 189 { 190 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) 191 ; 192 } 193 194 static void rps_reset_interrupts(struct intel_rps *rps) 195 { 196 struct intel_gt *gt = rps_to_gt(rps); 197 198 spin_lock_irq(>->irq_lock); 199 if (INTEL_GEN(gt->i915) >= 11) 200 gen11_rps_reset_interrupts(rps); 201 else 202 gen6_rps_reset_interrupts(rps); 203 204 rps->pm_iir = 0; 205 spin_unlock_irq(>->irq_lock); 206 } 207 208 static void rps_disable_interrupts(struct intel_rps *rps) 209 { 210 struct intel_gt *gt = rps_to_gt(rps); 211 212 intel_uncore_write(gt->uncore, 213 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); 214 215 spin_lock_irq(>->irq_lock); 216 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); 217 spin_unlock_irq(>->irq_lock); 218 219 intel_synchronize_irq(gt->i915); 220 221 /* 222 * Now that we will not be generating any more work, flush any 223 * outstanding tasks. As we are called on the RPS idle path, 224 * we will reset the GPU to minimum frequencies, so the current 225 * state of the worker can be discarded. 226 */ 227 cancel_work_sync(&rps->work); 228 229 rps_reset_interrupts(rps); 230 GT_TRACE(gt, "interrupts:off\n"); 231 } 232 233 static const struct cparams { 234 u16 i; 235 u16 t; 236 u16 m; 237 u16 c; 238 } cparams[] = { 239 { 1, 1333, 301, 28664 }, 240 { 1, 1066, 294, 24460 }, 241 { 1, 800, 294, 25192 }, 242 { 0, 1333, 276, 27605 }, 243 { 0, 1066, 276, 27605 }, 244 { 0, 800, 231, 23784 }, 245 }; 246 247 static void gen5_rps_init(struct intel_rps *rps) 248 { 249 struct drm_i915_private *i915 = rps_to_i915(rps); 250 struct intel_uncore *uncore = rps_to_uncore(rps); 251 u8 fmax, fmin, fstart; 252 u32 rgvmodectl; 253 int c_m, i; 254 255 if (i915->fsb_freq <= 3200) 256 c_m = 0; 257 else if (i915->fsb_freq <= 4800) 258 c_m = 1; 259 else 260 c_m = 2; 261 262 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 263 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { 264 rps->ips.m = cparams[i].m; 265 rps->ips.c = cparams[i].c; 266 break; 267 } 268 } 269 270 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 271 272 /* Set up min, max, and cur for interrupt handling */ 273 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 274 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 275 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 276 MEMMODE_FSTART_SHIFT; 277 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", 278 fmax, fmin, fstart); 279 280 rps->min_freq = fmax; 281 rps->efficient_freq = fstart; 282 rps->max_freq = fmin; 283 } 284 285 static unsigned long 286 __ips_chipset_val(struct intel_ips *ips) 287 { 288 struct intel_uncore *uncore = 289 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 290 unsigned long now = jiffies_to_msecs(jiffies), dt; 291 unsigned long result; 292 u64 total, delta; 293 294 lockdep_assert_held(&mchdev_lock); 295 296 /* 297 * Prevent division-by-zero if we are asking too fast. 298 * Also, we don't get interesting results if we are polling 299 * faster than once in 10ms, so just return the saved value 300 * in such cases. 301 */ 302 dt = now - ips->last_time1; 303 if (dt <= 10) 304 return ips->chipset_power; 305 306 /* FIXME: handle per-counter overflow */ 307 total = intel_uncore_read(uncore, DMIEC); 308 total += intel_uncore_read(uncore, DDREC); 309 total += intel_uncore_read(uncore, CSIEC); 310 311 delta = total - ips->last_count1; 312 313 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); 314 315 ips->last_count1 = total; 316 ips->last_time1 = now; 317 318 ips->chipset_power = result; 319 320 return result; 321 } 322 323 static unsigned long ips_mch_val(struct intel_uncore *uncore) 324 { 325 unsigned int m, x, b; 326 u32 tsfs; 327 328 tsfs = intel_uncore_read(uncore, TSFS); 329 x = intel_uncore_read8(uncore, TR1); 330 331 b = tsfs & TSFS_INTR_MASK; 332 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; 333 334 return m * x / 127 - b; 335 } 336 337 static int _pxvid_to_vd(u8 pxvid) 338 { 339 if (pxvid == 0) 340 return 0; 341 342 if (pxvid >= 8 && pxvid < 31) 343 pxvid = 31; 344 345 return (pxvid + 2) * 125; 346 } 347 348 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) 349 { 350 const int vd = _pxvid_to_vd(pxvid); 351 352 if (INTEL_INFO(i915)->is_mobile) 353 return max(vd - 1125, 0); 354 355 return vd; 356 } 357 358 static void __gen5_ips_update(struct intel_ips *ips) 359 { 360 struct intel_uncore *uncore = 361 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 362 u64 now, delta, dt; 363 u32 count; 364 365 lockdep_assert_held(&mchdev_lock); 366 367 now = ktime_get_raw_ns(); 368 dt = now - ips->last_time2; 369 do_div(dt, NSEC_PER_MSEC); 370 371 /* Don't divide by 0 */ 372 if (dt <= 10) 373 return; 374 375 count = intel_uncore_read(uncore, GFXEC); 376 delta = count - ips->last_count2; 377 378 ips->last_count2 = count; 379 ips->last_time2 = now; 380 381 /* More magic constants... */ 382 ips->gfx_power = div_u64(delta * 1181, dt * 10); 383 } 384 385 static void gen5_rps_update(struct intel_rps *rps) 386 { 387 spin_lock_irq(&mchdev_lock); 388 __gen5_ips_update(&rps->ips); 389 spin_unlock_irq(&mchdev_lock); 390 } 391 392 static bool gen5_rps_set(struct intel_rps *rps, u8 val) 393 { 394 struct intel_uncore *uncore = rps_to_uncore(rps); 395 u16 rgvswctl; 396 397 lockdep_assert_held(&mchdev_lock); 398 399 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 400 if (rgvswctl & MEMCTL_CMD_STS) { 401 DRM_DEBUG("gpu busy, RCS change rejected\n"); 402 return false; /* still busy with another command */ 403 } 404 405 /* Invert the frequency bin into an ips delay */ 406 val = rps->max_freq - val; 407 val = rps->min_freq + val; 408 409 rgvswctl = 410 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 411 (val << MEMCTL_FREQ_SHIFT) | 412 MEMCTL_SFCAVM; 413 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 414 intel_uncore_posting_read16(uncore, MEMSWCTL); 415 416 rgvswctl |= MEMCTL_CMD_STS; 417 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 418 419 return true; 420 } 421 422 static unsigned long intel_pxfreq(u32 vidfreq) 423 { 424 int div = (vidfreq & 0x3f0000) >> 16; 425 int post = (vidfreq & 0x3000) >> 12; 426 int pre = (vidfreq & 0x7); 427 428 if (!pre) 429 return 0; 430 431 return div * 133333 / (pre << post); 432 } 433 434 static unsigned int init_emon(struct intel_uncore *uncore) 435 { 436 u8 pxw[16]; 437 int i; 438 439 /* Disable to program */ 440 intel_uncore_write(uncore, ECR, 0); 441 intel_uncore_posting_read(uncore, ECR); 442 443 /* Program energy weights for various events */ 444 intel_uncore_write(uncore, SDEW, 0x15040d00); 445 intel_uncore_write(uncore, CSIEW0, 0x007f0000); 446 intel_uncore_write(uncore, CSIEW1, 0x1e220004); 447 intel_uncore_write(uncore, CSIEW2, 0x04000004); 448 449 for (i = 0; i < 5; i++) 450 intel_uncore_write(uncore, PEW(i), 0); 451 for (i = 0; i < 3; i++) 452 intel_uncore_write(uncore, DEW(i), 0); 453 454 /* Program P-state weights to account for frequency power adjustment */ 455 for (i = 0; i < 16; i++) { 456 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); 457 unsigned int freq = intel_pxfreq(pxvidfreq); 458 unsigned int vid = 459 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 460 unsigned int val; 461 462 val = vid * vid * freq / 1000 * 255; 463 val /= 127 * 127 * 900; 464 465 pxw[i] = val; 466 } 467 /* Render standby states get 0 weight */ 468 pxw[14] = 0; 469 pxw[15] = 0; 470 471 for (i = 0; i < 4; i++) { 472 intel_uncore_write(uncore, PXW(i), 473 pxw[i * 4 + 0] << 24 | 474 pxw[i * 4 + 1] << 16 | 475 pxw[i * 4 + 2] << 8 | 476 pxw[i * 4 + 3] << 0); 477 } 478 479 /* Adjust magic regs to magic values (more experimental results) */ 480 intel_uncore_write(uncore, OGW0, 0); 481 intel_uncore_write(uncore, OGW1, 0); 482 intel_uncore_write(uncore, EG0, 0x00007f00); 483 intel_uncore_write(uncore, EG1, 0x0000000e); 484 intel_uncore_write(uncore, EG2, 0x000e0000); 485 intel_uncore_write(uncore, EG3, 0x68000300); 486 intel_uncore_write(uncore, EG4, 0x42000000); 487 intel_uncore_write(uncore, EG5, 0x00140031); 488 intel_uncore_write(uncore, EG6, 0); 489 intel_uncore_write(uncore, EG7, 0); 490 491 for (i = 0; i < 8; i++) 492 intel_uncore_write(uncore, PXWL(i), 0); 493 494 /* Enable PMON + select events */ 495 intel_uncore_write(uncore, ECR, 0x80000019); 496 497 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; 498 } 499 500 static bool gen5_rps_enable(struct intel_rps *rps) 501 { 502 struct intel_uncore *uncore = rps_to_uncore(rps); 503 u8 fstart, vstart; 504 u32 rgvmodectl; 505 506 spin_lock_irq(&mchdev_lock); 507 508 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 509 510 /* Enable temp reporting */ 511 intel_uncore_write16(uncore, PMMISC, 512 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); 513 intel_uncore_write16(uncore, TSC1, 514 intel_uncore_read16(uncore, TSC1) | TSE); 515 516 /* 100ms RC evaluation intervals */ 517 intel_uncore_write(uncore, RCUPEI, 100000); 518 intel_uncore_write(uncore, RCDNEI, 100000); 519 520 /* Set max/min thresholds to 90ms and 80ms respectively */ 521 intel_uncore_write(uncore, RCBMAXAVG, 90000); 522 intel_uncore_write(uncore, RCBMINAVG, 80000); 523 524 intel_uncore_write(uncore, MEMIHYST, 1); 525 526 /* Set up min, max, and cur for interrupt handling */ 527 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 528 MEMMODE_FSTART_SHIFT; 529 530 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & 531 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 532 533 intel_uncore_write(uncore, 534 MEMINTREN, 535 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 536 537 intel_uncore_write(uncore, VIDSTART, vstart); 538 intel_uncore_posting_read(uncore, VIDSTART); 539 540 rgvmodectl |= MEMMODE_SWMODE_EN; 541 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); 542 543 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & 544 MEMCTL_CMD_STS) == 0, 10)) 545 drm_err(&uncore->i915->drm, 546 "stuck trying to change perf mode\n"); 547 mdelay(1); 548 549 gen5_rps_set(rps, rps->cur_freq); 550 551 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); 552 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); 553 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); 554 rps->ips.last_time1 = jiffies_to_msecs(jiffies); 555 556 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); 557 rps->ips.last_time2 = ktime_get_raw_ns(); 558 559 spin_unlock_irq(&mchdev_lock); 560 561 rps->ips.corr = init_emon(uncore); 562 563 return true; 564 } 565 566 static void gen5_rps_disable(struct intel_rps *rps) 567 { 568 struct intel_uncore *uncore = rps_to_uncore(rps); 569 u16 rgvswctl; 570 571 spin_lock_irq(&mchdev_lock); 572 573 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 574 575 /* Ack interrupts, disable EFC interrupt */ 576 intel_uncore_write(uncore, MEMINTREN, 577 intel_uncore_read(uncore, MEMINTREN) & 578 ~MEMINT_EVAL_CHG_EN); 579 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 580 intel_uncore_write(uncore, DEIER, 581 intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); 582 intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); 583 intel_uncore_write(uncore, DEIMR, 584 intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); 585 586 /* Go back to the starting frequency */ 587 gen5_rps_set(rps, rps->idle_freq); 588 mdelay(1); 589 rgvswctl |= MEMCTL_CMD_STS; 590 intel_uncore_write(uncore, MEMSWCTL, rgvswctl); 591 mdelay(1); 592 593 spin_unlock_irq(&mchdev_lock); 594 } 595 596 static u32 rps_limits(struct intel_rps *rps, u8 val) 597 { 598 u32 limits; 599 600 /* 601 * Only set the down limit when we've reached the lowest level to avoid 602 * getting more interrupts, otherwise leave this clear. This prevents a 603 * race in the hw when coming out of rc6: There's a tiny window where 604 * the hw runs at the minimal clock before selecting the desired 605 * frequency, if the down threshold expires in that window we will not 606 * receive a down interrupt. 607 */ 608 if (INTEL_GEN(rps_to_i915(rps)) >= 9) { 609 limits = rps->max_freq_softlimit << 23; 610 if (val <= rps->min_freq_softlimit) 611 limits |= rps->min_freq_softlimit << 14; 612 } else { 613 limits = rps->max_freq_softlimit << 24; 614 if (val <= rps->min_freq_softlimit) 615 limits |= rps->min_freq_softlimit << 16; 616 } 617 618 return limits; 619 } 620 621 static void rps_set_power(struct intel_rps *rps, int new_power) 622 { 623 struct intel_gt *gt = rps_to_gt(rps); 624 struct intel_uncore *uncore = gt->uncore; 625 u32 threshold_up = 0, threshold_down = 0; /* in % */ 626 u32 ei_up = 0, ei_down = 0; 627 628 lockdep_assert_held(&rps->power.mutex); 629 630 if (new_power == rps->power.mode) 631 return; 632 633 threshold_up = 95; 634 threshold_down = 85; 635 636 /* Note the units here are not exactly 1us, but 1280ns. */ 637 switch (new_power) { 638 case LOW_POWER: 639 ei_up = 16000; 640 ei_down = 32000; 641 break; 642 643 case BETWEEN: 644 ei_up = 13000; 645 ei_down = 32000; 646 break; 647 648 case HIGH_POWER: 649 ei_up = 10000; 650 ei_down = 32000; 651 break; 652 } 653 654 /* When byt can survive without system hang with dynamic 655 * sw freq adjustments, this restriction can be lifted. 656 */ 657 if (IS_VALLEYVIEW(gt->i915)) 658 goto skip_hw_write; 659 660 GT_TRACE(gt, 661 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", 662 new_power, threshold_up, ei_up, threshold_down, ei_down); 663 664 set(uncore, GEN6_RP_UP_EI, 665 intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); 666 set(uncore, GEN6_RP_UP_THRESHOLD, 667 intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10)); 668 669 set(uncore, GEN6_RP_DOWN_EI, 670 intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); 671 set(uncore, GEN6_RP_DOWN_THRESHOLD, 672 intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10)); 673 674 set(uncore, GEN6_RP_CONTROL, 675 (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | 676 GEN6_RP_MEDIA_HW_NORMAL_MODE | 677 GEN6_RP_MEDIA_IS_GFX | 678 GEN6_RP_ENABLE | 679 GEN6_RP_UP_BUSY_AVG | 680 GEN6_RP_DOWN_IDLE_AVG); 681 682 skip_hw_write: 683 rps->power.mode = new_power; 684 rps->power.up_threshold = threshold_up; 685 rps->power.down_threshold = threshold_down; 686 } 687 688 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) 689 { 690 int new_power; 691 692 new_power = rps->power.mode; 693 switch (rps->power.mode) { 694 case LOW_POWER: 695 if (val > rps->efficient_freq + 1 && 696 val > rps->cur_freq) 697 new_power = BETWEEN; 698 break; 699 700 case BETWEEN: 701 if (val <= rps->efficient_freq && 702 val < rps->cur_freq) 703 new_power = LOW_POWER; 704 else if (val >= rps->rp0_freq && 705 val > rps->cur_freq) 706 new_power = HIGH_POWER; 707 break; 708 709 case HIGH_POWER: 710 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && 711 val < rps->cur_freq) 712 new_power = BETWEEN; 713 break; 714 } 715 /* Max/min bins are special */ 716 if (val <= rps->min_freq_softlimit) 717 new_power = LOW_POWER; 718 if (val >= rps->max_freq_softlimit) 719 new_power = HIGH_POWER; 720 721 mutex_lock(&rps->power.mutex); 722 if (rps->power.interactive) 723 new_power = HIGH_POWER; 724 rps_set_power(rps, new_power); 725 mutex_unlock(&rps->power.mutex); 726 } 727 728 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) 729 { 730 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive)); 731 732 mutex_lock(&rps->power.mutex); 733 if (interactive) { 734 if (!rps->power.interactive++ && intel_rps_is_active(rps)) 735 rps_set_power(rps, HIGH_POWER); 736 } else { 737 GEM_BUG_ON(!rps->power.interactive); 738 rps->power.interactive--; 739 } 740 mutex_unlock(&rps->power.mutex); 741 } 742 743 static int gen6_rps_set(struct intel_rps *rps, u8 val) 744 { 745 struct intel_uncore *uncore = rps_to_uncore(rps); 746 struct drm_i915_private *i915 = rps_to_i915(rps); 747 u32 swreq; 748 749 if (INTEL_GEN(i915) >= 9) 750 swreq = GEN9_FREQUENCY(val); 751 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 752 swreq = HSW_FREQUENCY(val); 753 else 754 swreq = (GEN6_FREQUENCY(val) | 755 GEN6_OFFSET(0) | 756 GEN6_AGGRESSIVE_TURBO); 757 set(uncore, GEN6_RPNSWREQ, swreq); 758 759 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", 760 val, intel_gpu_freq(rps, val), swreq); 761 762 return 0; 763 } 764 765 static int vlv_rps_set(struct intel_rps *rps, u8 val) 766 { 767 struct drm_i915_private *i915 = rps_to_i915(rps); 768 int err; 769 770 vlv_punit_get(i915); 771 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); 772 vlv_punit_put(i915); 773 774 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", 775 val, intel_gpu_freq(rps, val)); 776 777 return err; 778 } 779 780 static int rps_set(struct intel_rps *rps, u8 val, bool update) 781 { 782 struct drm_i915_private *i915 = rps_to_i915(rps); 783 int err; 784 785 if (INTEL_GEN(i915) < 6) 786 return 0; 787 788 if (val == rps->last_freq) 789 return 0; 790 791 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 792 err = vlv_rps_set(rps, val); 793 else 794 err = gen6_rps_set(rps, val); 795 if (err) 796 return err; 797 798 if (update) 799 gen6_rps_set_thresholds(rps, val); 800 rps->last_freq = val; 801 802 return 0; 803 } 804 805 void intel_rps_unpark(struct intel_rps *rps) 806 { 807 if (!intel_rps_is_enabled(rps)) 808 return; 809 810 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); 811 812 /* 813 * Use the user's desired frequency as a guide, but for better 814 * performance, jump directly to RPe as our starting frequency. 815 */ 816 mutex_lock(&rps->lock); 817 818 intel_rps_set_active(rps); 819 intel_rps_set(rps, 820 clamp(rps->cur_freq, 821 rps->min_freq_softlimit, 822 rps->max_freq_softlimit)); 823 824 mutex_unlock(&rps->lock); 825 826 rps->pm_iir = 0; 827 if (intel_rps_has_interrupts(rps)) 828 rps_enable_interrupts(rps); 829 if (intel_rps_uses_timer(rps)) 830 rps_start_timer(rps); 831 832 if (IS_GEN(rps_to_i915(rps), 5)) 833 gen5_rps_update(rps); 834 } 835 836 void intel_rps_park(struct intel_rps *rps) 837 { 838 int adj; 839 840 if (!intel_rps_clear_active(rps)) 841 return; 842 843 if (intel_rps_uses_timer(rps)) 844 rps_stop_timer(rps); 845 if (intel_rps_has_interrupts(rps)) 846 rps_disable_interrupts(rps); 847 848 if (rps->last_freq <= rps->idle_freq) 849 return; 850 851 /* 852 * The punit delays the write of the frequency and voltage until it 853 * determines the GPU is awake. During normal usage we don't want to 854 * waste power changing the frequency if the GPU is sleeping (rc6). 855 * However, the GPU and driver is now idle and we do not want to delay 856 * switching to minimum voltage (reducing power whilst idle) as we do 857 * not expect to be woken in the near future and so must flush the 858 * change by waking the device. 859 * 860 * We choose to take the media powerwell (either would do to trick the 861 * punit into committing the voltage change) as that takes a lot less 862 * power than the render powerwell. 863 */ 864 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); 865 rps_set(rps, rps->idle_freq, false); 866 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); 867 868 /* 869 * Since we will try and restart from the previously requested 870 * frequency on unparking, treat this idle point as a downclock 871 * interrupt and reduce the frequency for resume. If we park/unpark 872 * more frequently than the rps worker can run, we will not respond 873 * to any EI and never see a change in frequency. 874 * 875 * (Note we accommodate Cherryview's limitation of only using an 876 * even bin by applying it to all.) 877 */ 878 adj = rps->last_adj; 879 if (adj < 0) 880 adj *= 2; 881 else /* CHV needs even encode values */ 882 adj = -2; 883 rps->last_adj = adj; 884 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); 885 886 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); 887 } 888 889 void intel_rps_boost(struct i915_request *rq) 890 { 891 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; 892 unsigned long flags; 893 894 if (i915_request_signaled(rq) || !intel_rps_is_active(rps)) 895 return; 896 897 /* Serializes with i915_request_retire() */ 898 spin_lock_irqsave(&rq->lock, flags); 899 if (!i915_request_has_waitboost(rq) && 900 !dma_fence_is_signaled_locked(&rq->fence)) { 901 set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); 902 903 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 904 rq->fence.context, rq->fence.seqno); 905 906 if (!atomic_fetch_inc(&rps->num_waiters) && 907 READ_ONCE(rps->cur_freq) < rps->boost_freq) 908 schedule_work(&rps->work); 909 910 atomic_inc(&rps->boosts); 911 } 912 spin_unlock_irqrestore(&rq->lock, flags); 913 } 914 915 int intel_rps_set(struct intel_rps *rps, u8 val) 916 { 917 int err; 918 919 lockdep_assert_held(&rps->lock); 920 GEM_BUG_ON(val > rps->max_freq); 921 GEM_BUG_ON(val < rps->min_freq); 922 923 if (intel_rps_is_active(rps)) { 924 err = rps_set(rps, val, true); 925 if (err) 926 return err; 927 928 /* 929 * Make sure we continue to get interrupts 930 * until we hit the minimum or maximum frequencies. 931 */ 932 if (intel_rps_has_interrupts(rps)) { 933 struct intel_uncore *uncore = rps_to_uncore(rps); 934 935 set(uncore, 936 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val)); 937 938 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val)); 939 } 940 } 941 942 rps->cur_freq = val; 943 return 0; 944 } 945 946 static void gen6_rps_init(struct intel_rps *rps) 947 { 948 struct drm_i915_private *i915 = rps_to_i915(rps); 949 struct intel_uncore *uncore = rps_to_uncore(rps); 950 951 /* All of these values are in units of 50MHz */ 952 953 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 954 if (IS_GEN9_LP(i915)) { 955 u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); 956 957 rps->rp0_freq = (rp_state_cap >> 16) & 0xff; 958 rps->rp1_freq = (rp_state_cap >> 8) & 0xff; 959 rps->min_freq = (rp_state_cap >> 0) & 0xff; 960 } else { 961 u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); 962 963 rps->rp0_freq = (rp_state_cap >> 0) & 0xff; 964 rps->rp1_freq = (rp_state_cap >> 8) & 0xff; 965 rps->min_freq = (rp_state_cap >> 16) & 0xff; 966 } 967 968 /* hw_max = RP0 until we check for overclocking */ 969 rps->max_freq = rps->rp0_freq; 970 971 rps->efficient_freq = rps->rp1_freq; 972 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 973 IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { 974 u32 ddcc_status = 0; 975 976 if (sandybridge_pcode_read(i915, 977 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 978 &ddcc_status, NULL) == 0) 979 rps->efficient_freq = 980 clamp_t(u8, 981 (ddcc_status >> 8) & 0xff, 982 rps->min_freq, 983 rps->max_freq); 984 } 985 986 if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { 987 /* Store the frequency values in 16.66 MHZ units, which is 988 * the natural hardware unit for SKL 989 */ 990 rps->rp0_freq *= GEN9_FREQ_SCALER; 991 rps->rp1_freq *= GEN9_FREQ_SCALER; 992 rps->min_freq *= GEN9_FREQ_SCALER; 993 rps->max_freq *= GEN9_FREQ_SCALER; 994 rps->efficient_freq *= GEN9_FREQ_SCALER; 995 } 996 } 997 998 static bool rps_reset(struct intel_rps *rps) 999 { 1000 struct drm_i915_private *i915 = rps_to_i915(rps); 1001 1002 /* force a reset */ 1003 rps->power.mode = -1; 1004 rps->last_freq = -1; 1005 1006 if (rps_set(rps, rps->min_freq, true)) { 1007 drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); 1008 return false; 1009 } 1010 1011 rps->cur_freq = rps->min_freq; 1012 return true; 1013 } 1014 1015 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 1016 static bool gen9_rps_enable(struct intel_rps *rps) 1017 { 1018 struct intel_gt *gt = rps_to_gt(rps); 1019 struct intel_uncore *uncore = gt->uncore; 1020 1021 /* Program defaults and thresholds for RPS */ 1022 if (IS_GEN(gt->i915, 9)) 1023 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1024 GEN9_FREQUENCY(rps->rp1_freq)); 1025 1026 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); 1027 1028 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1029 1030 return rps_reset(rps); 1031 } 1032 1033 static bool gen8_rps_enable(struct intel_rps *rps) 1034 { 1035 struct intel_uncore *uncore = rps_to_uncore(rps); 1036 1037 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1038 HSW_FREQUENCY(rps->rp1_freq)); 1039 1040 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1041 1042 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1043 1044 return rps_reset(rps); 1045 } 1046 1047 static bool gen6_rps_enable(struct intel_rps *rps) 1048 { 1049 struct intel_uncore *uncore = rps_to_uncore(rps); 1050 1051 /* Power down if completely idle for over 50ms */ 1052 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); 1053 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1054 1055 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1056 GEN6_PM_RP_DOWN_THRESHOLD | 1057 GEN6_PM_RP_DOWN_TIMEOUT); 1058 1059 return rps_reset(rps); 1060 } 1061 1062 static int chv_rps_max_freq(struct intel_rps *rps) 1063 { 1064 struct drm_i915_private *i915 = rps_to_i915(rps); 1065 u32 val; 1066 1067 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1068 1069 switch (RUNTIME_INFO(i915)->sseu.eu_total) { 1070 case 8: 1071 /* (2 * 4) config */ 1072 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; 1073 break; 1074 case 12: 1075 /* (2 * 6) config */ 1076 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; 1077 break; 1078 case 16: 1079 /* (2 * 8) config */ 1080 default: 1081 /* Setting (2 * 8) Min RP0 for any other combination */ 1082 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; 1083 break; 1084 } 1085 1086 return val & FB_GFX_FREQ_FUSE_MASK; 1087 } 1088 1089 static int chv_rps_rpe_freq(struct intel_rps *rps) 1090 { 1091 struct drm_i915_private *i915 = rps_to_i915(rps); 1092 u32 val; 1093 1094 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); 1095 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; 1096 1097 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 1098 } 1099 1100 static int chv_rps_guar_freq(struct intel_rps *rps) 1101 { 1102 struct drm_i915_private *i915 = rps_to_i915(rps); 1103 u32 val; 1104 1105 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1106 1107 return val & FB_GFX_FREQ_FUSE_MASK; 1108 } 1109 1110 static u32 chv_rps_min_freq(struct intel_rps *rps) 1111 { 1112 struct drm_i915_private *i915 = rps_to_i915(rps); 1113 u32 val; 1114 1115 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); 1116 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; 1117 1118 return val & FB_GFX_FREQ_FUSE_MASK; 1119 } 1120 1121 static bool chv_rps_enable(struct intel_rps *rps) 1122 { 1123 struct intel_uncore *uncore = rps_to_uncore(rps); 1124 struct drm_i915_private *i915 = rps_to_i915(rps); 1125 u32 val; 1126 1127 /* 1: Program defaults and thresholds for RPS*/ 1128 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1129 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1130 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1131 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1132 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1133 1134 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1135 1136 /* 2: Enable RPS */ 1137 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1138 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1139 GEN6_RP_MEDIA_IS_GFX | 1140 GEN6_RP_ENABLE | 1141 GEN6_RP_UP_BUSY_AVG | 1142 GEN6_RP_DOWN_IDLE_AVG); 1143 1144 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1145 GEN6_PM_RP_DOWN_THRESHOLD | 1146 GEN6_PM_RP_DOWN_TIMEOUT); 1147 1148 /* Setting Fixed Bias */ 1149 vlv_punit_get(i915); 1150 1151 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; 1152 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1153 1154 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1155 1156 vlv_punit_put(i915); 1157 1158 /* RPS code assumes GPLL is used */ 1159 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1160 "GPLL not enabled\n"); 1161 1162 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 1163 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1164 1165 return rps_reset(rps); 1166 } 1167 1168 static int vlv_rps_guar_freq(struct intel_rps *rps) 1169 { 1170 struct drm_i915_private *i915 = rps_to_i915(rps); 1171 u32 val, rp1; 1172 1173 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1174 1175 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; 1176 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 1177 1178 return rp1; 1179 } 1180 1181 static int vlv_rps_max_freq(struct intel_rps *rps) 1182 { 1183 struct drm_i915_private *i915 = rps_to_i915(rps); 1184 u32 val, rp0; 1185 1186 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1187 1188 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 1189 /* Clamp to max */ 1190 rp0 = min_t(u32, rp0, 0xea); 1191 1192 return rp0; 1193 } 1194 1195 static int vlv_rps_rpe_freq(struct intel_rps *rps) 1196 { 1197 struct drm_i915_private *i915 = rps_to_i915(rps); 1198 u32 val, rpe; 1199 1200 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 1201 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 1202 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 1203 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 1204 1205 return rpe; 1206 } 1207 1208 static int vlv_rps_min_freq(struct intel_rps *rps) 1209 { 1210 struct drm_i915_private *i915 = rps_to_i915(rps); 1211 u32 val; 1212 1213 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; 1214 /* 1215 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 1216 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 1217 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 1218 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 1219 * to make sure it matches what Punit accepts. 1220 */ 1221 return max_t(u32, val, 0xc0); 1222 } 1223 1224 static bool vlv_rps_enable(struct intel_rps *rps) 1225 { 1226 struct intel_uncore *uncore = rps_to_uncore(rps); 1227 struct drm_i915_private *i915 = rps_to_i915(rps); 1228 u32 val; 1229 1230 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1231 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1232 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1233 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1234 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1235 1236 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1237 1238 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1239 GEN6_RP_MEDIA_TURBO | 1240 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1241 GEN6_RP_MEDIA_IS_GFX | 1242 GEN6_RP_ENABLE | 1243 GEN6_RP_UP_BUSY_AVG | 1244 GEN6_RP_DOWN_IDLE_CONT); 1245 1246 /* WaGsvRC0ResidencyMethod:vlv */ 1247 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; 1248 1249 vlv_punit_get(i915); 1250 1251 /* Setting Fixed Bias */ 1252 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; 1253 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1254 1255 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1256 1257 vlv_punit_put(i915); 1258 1259 /* RPS code assumes GPLL is used */ 1260 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1261 "GPLL not enabled\n"); 1262 1263 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 1264 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1265 1266 return rps_reset(rps); 1267 } 1268 1269 static unsigned long __ips_gfx_val(struct intel_ips *ips) 1270 { 1271 struct intel_rps *rps = container_of(ips, typeof(*rps), ips); 1272 struct intel_uncore *uncore = rps_to_uncore(rps); 1273 unsigned long t, corr, state1, corr2, state2; 1274 u32 pxvid, ext_v; 1275 1276 lockdep_assert_held(&mchdev_lock); 1277 1278 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); 1279 pxvid = (pxvid >> 24) & 0x7f; 1280 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); 1281 1282 state1 = ext_v; 1283 1284 /* Revel in the empirically derived constants */ 1285 1286 /* Correction factor in 1/100000 units */ 1287 t = ips_mch_val(uncore); 1288 if (t > 80) 1289 corr = t * 2349 + 135940; 1290 else if (t >= 50) 1291 corr = t * 964 + 29317; 1292 else /* < 50 */ 1293 corr = t * 301 + 1004; 1294 1295 corr = corr * 150142 * state1 / 10000 - 78642; 1296 corr /= 100000; 1297 corr2 = corr * ips->corr; 1298 1299 state2 = corr2 * state1 / 10000; 1300 state2 /= 100; /* convert to mW */ 1301 1302 __gen5_ips_update(ips); 1303 1304 return ips->gfx_power + state2; 1305 } 1306 1307 static bool has_busy_stats(struct intel_rps *rps) 1308 { 1309 struct intel_engine_cs *engine; 1310 enum intel_engine_id id; 1311 1312 for_each_engine(engine, rps_to_gt(rps), id) { 1313 if (!intel_engine_supports_stats(engine)) 1314 return false; 1315 } 1316 1317 return true; 1318 } 1319 1320 void intel_rps_enable(struct intel_rps *rps) 1321 { 1322 struct drm_i915_private *i915 = rps_to_i915(rps); 1323 struct intel_uncore *uncore = rps_to_uncore(rps); 1324 bool enabled = false; 1325 1326 if (!HAS_RPS(i915)) 1327 return; 1328 1329 intel_gt_check_clock_frequency(rps_to_gt(rps)); 1330 1331 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1332 if (rps->max_freq <= rps->min_freq) 1333 /* leave disabled, no room for dynamic reclocking */; 1334 else if (IS_CHERRYVIEW(i915)) 1335 enabled = chv_rps_enable(rps); 1336 else if (IS_VALLEYVIEW(i915)) 1337 enabled = vlv_rps_enable(rps); 1338 else if (INTEL_GEN(i915) >= 9) 1339 enabled = gen9_rps_enable(rps); 1340 else if (INTEL_GEN(i915) >= 8) 1341 enabled = gen8_rps_enable(rps); 1342 else if (INTEL_GEN(i915) >= 6) 1343 enabled = gen6_rps_enable(rps); 1344 else if (IS_IRONLAKE_M(i915)) 1345 enabled = gen5_rps_enable(rps); 1346 else 1347 MISSING_CASE(INTEL_GEN(i915)); 1348 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1349 if (!enabled) 1350 return; 1351 1352 GT_TRACE(rps_to_gt(rps), 1353 "min:%x, max:%x, freq:[%d, %d]\n", 1354 rps->min_freq, rps->max_freq, 1355 intel_gpu_freq(rps, rps->min_freq), 1356 intel_gpu_freq(rps, rps->max_freq)); 1357 1358 GEM_BUG_ON(rps->max_freq < rps->min_freq); 1359 GEM_BUG_ON(rps->idle_freq > rps->max_freq); 1360 1361 GEM_BUG_ON(rps->efficient_freq < rps->min_freq); 1362 GEM_BUG_ON(rps->efficient_freq > rps->max_freq); 1363 1364 if (has_busy_stats(rps)) 1365 intel_rps_set_timer(rps); 1366 else if (INTEL_GEN(i915) >= 6) 1367 intel_rps_set_interrupts(rps); 1368 else 1369 /* Ironlake currently uses intel_ips.ko */ {} 1370 1371 intel_rps_set_enabled(rps); 1372 } 1373 1374 static void gen6_rps_disable(struct intel_rps *rps) 1375 { 1376 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); 1377 } 1378 1379 void intel_rps_disable(struct intel_rps *rps) 1380 { 1381 struct drm_i915_private *i915 = rps_to_i915(rps); 1382 1383 intel_rps_clear_enabled(rps); 1384 intel_rps_clear_interrupts(rps); 1385 intel_rps_clear_timer(rps); 1386 1387 if (INTEL_GEN(i915) >= 6) 1388 gen6_rps_disable(rps); 1389 else if (IS_IRONLAKE_M(i915)) 1390 gen5_rps_disable(rps); 1391 } 1392 1393 static int byt_gpu_freq(struct intel_rps *rps, int val) 1394 { 1395 /* 1396 * N = val - 0xb7 1397 * Slow = Fast = GPLL ref * N 1398 */ 1399 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); 1400 } 1401 1402 static int byt_freq_opcode(struct intel_rps *rps, int val) 1403 { 1404 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; 1405 } 1406 1407 static int chv_gpu_freq(struct intel_rps *rps, int val) 1408 { 1409 /* 1410 * N = val / 2 1411 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 1412 */ 1413 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); 1414 } 1415 1416 static int chv_freq_opcode(struct intel_rps *rps, int val) 1417 { 1418 /* CHV needs even values */ 1419 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; 1420 } 1421 1422 int intel_gpu_freq(struct intel_rps *rps, int val) 1423 { 1424 struct drm_i915_private *i915 = rps_to_i915(rps); 1425 1426 if (INTEL_GEN(i915) >= 9) 1427 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 1428 GEN9_FREQ_SCALER); 1429 else if (IS_CHERRYVIEW(i915)) 1430 return chv_gpu_freq(rps, val); 1431 else if (IS_VALLEYVIEW(i915)) 1432 return byt_gpu_freq(rps, val); 1433 else 1434 return val * GT_FREQUENCY_MULTIPLIER; 1435 } 1436 1437 int intel_freq_opcode(struct intel_rps *rps, int val) 1438 { 1439 struct drm_i915_private *i915 = rps_to_i915(rps); 1440 1441 if (INTEL_GEN(i915) >= 9) 1442 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 1443 GT_FREQUENCY_MULTIPLIER); 1444 else if (IS_CHERRYVIEW(i915)) 1445 return chv_freq_opcode(rps, val); 1446 else if (IS_VALLEYVIEW(i915)) 1447 return byt_freq_opcode(rps, val); 1448 else 1449 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 1450 } 1451 1452 static void vlv_init_gpll_ref_freq(struct intel_rps *rps) 1453 { 1454 struct drm_i915_private *i915 = rps_to_i915(rps); 1455 1456 rps->gpll_ref_freq = 1457 vlv_get_cck_clock(i915, "GPLL ref", 1458 CCK_GPLL_CLOCK_CONTROL, 1459 i915->czclk_freq); 1460 1461 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", 1462 rps->gpll_ref_freq); 1463 } 1464 1465 static void vlv_rps_init(struct intel_rps *rps) 1466 { 1467 struct drm_i915_private *i915 = rps_to_i915(rps); 1468 u32 val; 1469 1470 vlv_iosf_sb_get(i915, 1471 BIT(VLV_IOSF_SB_PUNIT) | 1472 BIT(VLV_IOSF_SB_NC) | 1473 BIT(VLV_IOSF_SB_CCK)); 1474 1475 vlv_init_gpll_ref_freq(rps); 1476 1477 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1478 switch ((val >> 6) & 3) { 1479 case 0: 1480 case 1: 1481 i915->mem_freq = 800; 1482 break; 1483 case 2: 1484 i915->mem_freq = 1066; 1485 break; 1486 case 3: 1487 i915->mem_freq = 1333; 1488 break; 1489 } 1490 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1491 1492 rps->max_freq = vlv_rps_max_freq(rps); 1493 rps->rp0_freq = rps->max_freq; 1494 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1495 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1496 1497 rps->efficient_freq = vlv_rps_rpe_freq(rps); 1498 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1499 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1500 1501 rps->rp1_freq = vlv_rps_guar_freq(rps); 1502 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 1503 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1504 1505 rps->min_freq = vlv_rps_min_freq(rps); 1506 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1507 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1508 1509 vlv_iosf_sb_put(i915, 1510 BIT(VLV_IOSF_SB_PUNIT) | 1511 BIT(VLV_IOSF_SB_NC) | 1512 BIT(VLV_IOSF_SB_CCK)); 1513 } 1514 1515 static void chv_rps_init(struct intel_rps *rps) 1516 { 1517 struct drm_i915_private *i915 = rps_to_i915(rps); 1518 u32 val; 1519 1520 vlv_iosf_sb_get(i915, 1521 BIT(VLV_IOSF_SB_PUNIT) | 1522 BIT(VLV_IOSF_SB_NC) | 1523 BIT(VLV_IOSF_SB_CCK)); 1524 1525 vlv_init_gpll_ref_freq(rps); 1526 1527 val = vlv_cck_read(i915, CCK_FUSE_REG); 1528 1529 switch ((val >> 2) & 0x7) { 1530 case 3: 1531 i915->mem_freq = 2000; 1532 break; 1533 default: 1534 i915->mem_freq = 1600; 1535 break; 1536 } 1537 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1538 1539 rps->max_freq = chv_rps_max_freq(rps); 1540 rps->rp0_freq = rps->max_freq; 1541 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1542 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1543 1544 rps->efficient_freq = chv_rps_rpe_freq(rps); 1545 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1546 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1547 1548 rps->rp1_freq = chv_rps_guar_freq(rps); 1549 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", 1550 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1551 1552 rps->min_freq = chv_rps_min_freq(rps); 1553 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1554 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1555 1556 vlv_iosf_sb_put(i915, 1557 BIT(VLV_IOSF_SB_PUNIT) | 1558 BIT(VLV_IOSF_SB_NC) | 1559 BIT(VLV_IOSF_SB_CCK)); 1560 1561 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | 1562 rps->rp1_freq | rps->min_freq) & 1, 1563 "Odd GPU freq values\n"); 1564 } 1565 1566 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) 1567 { 1568 ei->ktime = ktime_get_raw(); 1569 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); 1570 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); 1571 } 1572 1573 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) 1574 { 1575 struct intel_uncore *uncore = rps_to_uncore(rps); 1576 const struct intel_rps_ei *prev = &rps->ei; 1577 struct intel_rps_ei now; 1578 u32 events = 0; 1579 1580 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1581 return 0; 1582 1583 vlv_c0_read(uncore, &now); 1584 1585 if (prev->ktime) { 1586 u64 time, c0; 1587 u32 render, media; 1588 1589 time = ktime_us_delta(now.ktime, prev->ktime); 1590 1591 time *= rps_to_i915(rps)->czclk_freq; 1592 1593 /* Workload can be split between render + media, 1594 * e.g. SwapBuffers being blitted in X after being rendered in 1595 * mesa. To account for this we need to combine both engines 1596 * into our activity counter. 1597 */ 1598 render = now.render_c0 - prev->render_c0; 1599 media = now.media_c0 - prev->media_c0; 1600 c0 = max(render, media); 1601 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1602 1603 if (c0 > time * rps->power.up_threshold) 1604 events = GEN6_PM_RP_UP_THRESHOLD; 1605 else if (c0 < time * rps->power.down_threshold) 1606 events = GEN6_PM_RP_DOWN_THRESHOLD; 1607 } 1608 1609 rps->ei = now; 1610 return events; 1611 } 1612 1613 static void rps_work(struct work_struct *work) 1614 { 1615 struct intel_rps *rps = container_of(work, typeof(*rps), work); 1616 struct intel_gt *gt = rps_to_gt(rps); 1617 struct drm_i915_private *i915 = rps_to_i915(rps); 1618 bool client_boost = false; 1619 int new_freq, adj, min, max; 1620 u32 pm_iir = 0; 1621 1622 spin_lock_irq(>->irq_lock); 1623 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; 1624 client_boost = atomic_read(&rps->num_waiters); 1625 spin_unlock_irq(>->irq_lock); 1626 1627 /* Make sure we didn't queue anything we're not going to process. */ 1628 if (!pm_iir && !client_boost) 1629 goto out; 1630 1631 mutex_lock(&rps->lock); 1632 if (!intel_rps_is_active(rps)) { 1633 mutex_unlock(&rps->lock); 1634 return; 1635 } 1636 1637 pm_iir |= vlv_wa_c0_ei(rps, pm_iir); 1638 1639 adj = rps->last_adj; 1640 new_freq = rps->cur_freq; 1641 min = rps->min_freq_softlimit; 1642 max = rps->max_freq_softlimit; 1643 if (client_boost) 1644 max = rps->max_freq; 1645 1646 GT_TRACE(gt, 1647 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", 1648 pm_iir, yesno(client_boost), 1649 adj, new_freq, min, max); 1650 1651 if (client_boost && new_freq < rps->boost_freq) { 1652 new_freq = rps->boost_freq; 1653 adj = 0; 1654 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1655 if (adj > 0) 1656 adj *= 2; 1657 else /* CHV needs even encode values */ 1658 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; 1659 1660 if (new_freq >= rps->max_freq_softlimit) 1661 adj = 0; 1662 } else if (client_boost) { 1663 adj = 0; 1664 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1665 if (rps->cur_freq > rps->efficient_freq) 1666 new_freq = rps->efficient_freq; 1667 else if (rps->cur_freq > rps->min_freq_softlimit) 1668 new_freq = rps->min_freq_softlimit; 1669 adj = 0; 1670 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1671 if (adj < 0) 1672 adj *= 2; 1673 else /* CHV needs even encode values */ 1674 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; 1675 1676 if (new_freq <= rps->min_freq_softlimit) 1677 adj = 0; 1678 } else { /* unknown event */ 1679 adj = 0; 1680 } 1681 1682 /* 1683 * sysfs frequency limits may have snuck in while 1684 * servicing the interrupt 1685 */ 1686 new_freq += adj; 1687 new_freq = clamp_t(int, new_freq, min, max); 1688 1689 if (intel_rps_set(rps, new_freq)) { 1690 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); 1691 adj = 0; 1692 } 1693 rps->last_adj = adj; 1694 1695 mutex_unlock(&rps->lock); 1696 1697 out: 1698 spin_lock_irq(>->irq_lock); 1699 gen6_gt_pm_unmask_irq(gt, rps->pm_events); 1700 spin_unlock_irq(>->irq_lock); 1701 } 1702 1703 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1704 { 1705 struct intel_gt *gt = rps_to_gt(rps); 1706 const u32 events = rps->pm_events & pm_iir; 1707 1708 lockdep_assert_held(>->irq_lock); 1709 1710 if (unlikely(!events)) 1711 return; 1712 1713 GT_TRACE(gt, "irq events:%x\n", events); 1714 1715 gen6_gt_pm_mask_irq(gt, events); 1716 1717 rps->pm_iir |= events; 1718 schedule_work(&rps->work); 1719 } 1720 1721 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1722 { 1723 struct intel_gt *gt = rps_to_gt(rps); 1724 u32 events; 1725 1726 events = pm_iir & rps->pm_events; 1727 if (events) { 1728 spin_lock(>->irq_lock); 1729 1730 GT_TRACE(gt, "irq events:%x\n", events); 1731 1732 gen6_gt_pm_mask_irq(gt, events); 1733 rps->pm_iir |= events; 1734 1735 schedule_work(&rps->work); 1736 spin_unlock(>->irq_lock); 1737 } 1738 1739 if (INTEL_GEN(gt->i915) >= 8) 1740 return; 1741 1742 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1743 intel_engine_signal_breadcrumbs(gt->engine[VECS0]); 1744 1745 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1746 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1747 } 1748 1749 void gen5_rps_irq_handler(struct intel_rps *rps) 1750 { 1751 struct intel_uncore *uncore = rps_to_uncore(rps); 1752 u32 busy_up, busy_down, max_avg, min_avg; 1753 u8 new_freq; 1754 1755 spin_lock(&mchdev_lock); 1756 1757 intel_uncore_write16(uncore, 1758 MEMINTRSTS, 1759 intel_uncore_read(uncore, MEMINTRSTS)); 1760 1761 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 1762 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); 1763 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); 1764 max_avg = intel_uncore_read(uncore, RCBMAXAVG); 1765 min_avg = intel_uncore_read(uncore, RCBMINAVG); 1766 1767 /* Handle RCS change request from hw */ 1768 new_freq = rps->cur_freq; 1769 if (busy_up > max_avg) 1770 new_freq++; 1771 else if (busy_down < min_avg) 1772 new_freq--; 1773 new_freq = clamp(new_freq, 1774 rps->min_freq_softlimit, 1775 rps->max_freq_softlimit); 1776 1777 if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq)) 1778 rps->cur_freq = new_freq; 1779 1780 spin_unlock(&mchdev_lock); 1781 } 1782 1783 void intel_rps_init_early(struct intel_rps *rps) 1784 { 1785 mutex_init(&rps->lock); 1786 mutex_init(&rps->power.mutex); 1787 1788 INIT_WORK(&rps->work, rps_work); 1789 timer_setup(&rps->timer, rps_timer, 0); 1790 1791 atomic_set(&rps->num_waiters, 0); 1792 } 1793 1794 void intel_rps_init(struct intel_rps *rps) 1795 { 1796 struct drm_i915_private *i915 = rps_to_i915(rps); 1797 1798 if (IS_CHERRYVIEW(i915)) 1799 chv_rps_init(rps); 1800 else if (IS_VALLEYVIEW(i915)) 1801 vlv_rps_init(rps); 1802 else if (INTEL_GEN(i915) >= 6) 1803 gen6_rps_init(rps); 1804 else if (IS_IRONLAKE_M(i915)) 1805 gen5_rps_init(rps); 1806 1807 /* Derive initial user preferences/limits from the hardware limits */ 1808 rps->max_freq_softlimit = rps->max_freq; 1809 rps->min_freq_softlimit = rps->min_freq; 1810 1811 /* After setting max-softlimit, find the overclock max freq */ 1812 if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { 1813 u32 params = 0; 1814 1815 sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, 1816 ¶ms, NULL); 1817 if (params & BIT(31)) { /* OC supported */ 1818 drm_dbg(&i915->drm, 1819 "Overclocking supported, max: %dMHz, overclock: %dMHz\n", 1820 (rps->max_freq & 0xff) * 50, 1821 (params & 0xff) * 50); 1822 rps->max_freq = params & 0xff; 1823 } 1824 } 1825 1826 /* Finally allow us to boost to max by default */ 1827 rps->boost_freq = rps->max_freq; 1828 rps->idle_freq = rps->min_freq; 1829 1830 /* Start in the middle, from here we will autotune based on workload */ 1831 rps->cur_freq = rps->efficient_freq; 1832 1833 rps->pm_intrmsk_mbz = 0; 1834 1835 /* 1836 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 1837 * if GEN6_PM_UP_EI_EXPIRED is masked. 1838 * 1839 * TODO: verify if this can be reproduced on VLV,CHV. 1840 */ 1841 if (INTEL_GEN(i915) <= 7) 1842 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 1843 1844 if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11) 1845 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 1846 } 1847 1848 void intel_rps_sanitize(struct intel_rps *rps) 1849 { 1850 if (INTEL_GEN(rps_to_i915(rps)) >= 6) 1851 rps_disable_interrupts(rps); 1852 } 1853 1854 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) 1855 { 1856 struct drm_i915_private *i915 = rps_to_i915(rps); 1857 u32 cagf; 1858 1859 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1860 cagf = (rpstat >> 8) & 0xff; 1861 else if (INTEL_GEN(i915) >= 9) 1862 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1863 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 1864 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1865 else 1866 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1867 1868 return cagf; 1869 } 1870 1871 static u32 read_cagf(struct intel_rps *rps) 1872 { 1873 struct drm_i915_private *i915 = rps_to_i915(rps); 1874 u32 freq; 1875 1876 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 1877 vlv_punit_get(i915); 1878 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1879 vlv_punit_put(i915); 1880 } else { 1881 freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1); 1882 } 1883 1884 return intel_rps_get_cagf(rps, freq); 1885 } 1886 1887 u32 intel_rps_read_actual_frequency(struct intel_rps *rps) 1888 { 1889 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 1890 intel_wakeref_t wakeref; 1891 u32 freq = 0; 1892 1893 with_intel_runtime_pm_if_in_use(rpm, wakeref) 1894 freq = intel_gpu_freq(rps, read_cagf(rps)); 1895 1896 return freq; 1897 } 1898 1899 /* External interface for intel_ips.ko */ 1900 1901 static struct drm_i915_private __rcu *ips_mchdev; 1902 1903 /** 1904 * Tells the intel_ips driver that the i915 driver is now loaded, if 1905 * IPS got loaded first. 1906 * 1907 * This awkward dance is so that neither module has to depend on the 1908 * other in order for IPS to do the appropriate communication of 1909 * GPU turbo limits to i915. 1910 */ 1911 static void 1912 ips_ping_for_i915_load(void) 1913 { 1914 void (*link)(void); 1915 1916 link = symbol_get(ips_link_to_i915_driver); 1917 if (link) { 1918 link(); 1919 symbol_put(ips_link_to_i915_driver); 1920 } 1921 } 1922 1923 void intel_rps_driver_register(struct intel_rps *rps) 1924 { 1925 struct intel_gt *gt = rps_to_gt(rps); 1926 1927 /* 1928 * We only register the i915 ips part with intel-ips once everything is 1929 * set up, to avoid intel-ips sneaking in and reading bogus values. 1930 */ 1931 if (IS_GEN(gt->i915, 5)) { 1932 GEM_BUG_ON(ips_mchdev); 1933 rcu_assign_pointer(ips_mchdev, gt->i915); 1934 ips_ping_for_i915_load(); 1935 } 1936 } 1937 1938 void intel_rps_driver_unregister(struct intel_rps *rps) 1939 { 1940 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps)) 1941 rcu_assign_pointer(ips_mchdev, NULL); 1942 } 1943 1944 static struct drm_i915_private *mchdev_get(void) 1945 { 1946 struct drm_i915_private *i915; 1947 1948 rcu_read_lock(); 1949 i915 = rcu_dereference(ips_mchdev); 1950 if (!kref_get_unless_zero(&i915->drm.ref)) 1951 i915 = NULL; 1952 rcu_read_unlock(); 1953 1954 return i915; 1955 } 1956 1957 /** 1958 * i915_read_mch_val - return value for IPS use 1959 * 1960 * Calculate and return a value for the IPS driver to use when deciding whether 1961 * we have thermal and power headroom to increase CPU or GPU power budget. 1962 */ 1963 unsigned long i915_read_mch_val(void) 1964 { 1965 struct drm_i915_private *i915; 1966 unsigned long chipset_val = 0; 1967 unsigned long graphics_val = 0; 1968 intel_wakeref_t wakeref; 1969 1970 i915 = mchdev_get(); 1971 if (!i915) 1972 return 0; 1973 1974 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 1975 struct intel_ips *ips = &i915->gt.rps.ips; 1976 1977 spin_lock_irq(&mchdev_lock); 1978 chipset_val = __ips_chipset_val(ips); 1979 graphics_val = __ips_gfx_val(ips); 1980 spin_unlock_irq(&mchdev_lock); 1981 } 1982 1983 drm_dev_put(&i915->drm); 1984 return chipset_val + graphics_val; 1985 } 1986 EXPORT_SYMBOL_GPL(i915_read_mch_val); 1987 1988 /** 1989 * i915_gpu_raise - raise GPU frequency limit 1990 * 1991 * Raise the limit; IPS indicates we have thermal headroom. 1992 */ 1993 bool i915_gpu_raise(void) 1994 { 1995 struct drm_i915_private *i915; 1996 struct intel_rps *rps; 1997 1998 i915 = mchdev_get(); 1999 if (!i915) 2000 return false; 2001 2002 rps = &i915->gt.rps; 2003 2004 spin_lock_irq(&mchdev_lock); 2005 if (rps->max_freq_softlimit < rps->max_freq) 2006 rps->max_freq_softlimit++; 2007 spin_unlock_irq(&mchdev_lock); 2008 2009 drm_dev_put(&i915->drm); 2010 return true; 2011 } 2012 EXPORT_SYMBOL_GPL(i915_gpu_raise); 2013 2014 /** 2015 * i915_gpu_lower - lower GPU frequency limit 2016 * 2017 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2018 * frequency maximum. 2019 */ 2020 bool i915_gpu_lower(void) 2021 { 2022 struct drm_i915_private *i915; 2023 struct intel_rps *rps; 2024 2025 i915 = mchdev_get(); 2026 if (!i915) 2027 return false; 2028 2029 rps = &i915->gt.rps; 2030 2031 spin_lock_irq(&mchdev_lock); 2032 if (rps->max_freq_softlimit > rps->min_freq) 2033 rps->max_freq_softlimit--; 2034 spin_unlock_irq(&mchdev_lock); 2035 2036 drm_dev_put(&i915->drm); 2037 return true; 2038 } 2039 EXPORT_SYMBOL_GPL(i915_gpu_lower); 2040 2041 /** 2042 * i915_gpu_busy - indicate GPU business to IPS 2043 * 2044 * Tell the IPS driver whether or not the GPU is busy. 2045 */ 2046 bool i915_gpu_busy(void) 2047 { 2048 struct drm_i915_private *i915; 2049 bool ret; 2050 2051 i915 = mchdev_get(); 2052 if (!i915) 2053 return false; 2054 2055 ret = i915->gt.awake; 2056 2057 drm_dev_put(&i915->drm); 2058 return ret; 2059 } 2060 EXPORT_SYMBOL_GPL(i915_gpu_busy); 2061 2062 /** 2063 * i915_gpu_turbo_disable - disable graphics turbo 2064 * 2065 * Disable graphics turbo by resetting the max frequency and setting the 2066 * current frequency to the default. 2067 */ 2068 bool i915_gpu_turbo_disable(void) 2069 { 2070 struct drm_i915_private *i915; 2071 struct intel_rps *rps; 2072 bool ret; 2073 2074 i915 = mchdev_get(); 2075 if (!i915) 2076 return false; 2077 2078 rps = &i915->gt.rps; 2079 2080 spin_lock_irq(&mchdev_lock); 2081 rps->max_freq_softlimit = rps->min_freq; 2082 ret = gen5_rps_set(&i915->gt.rps, rps->min_freq); 2083 spin_unlock_irq(&mchdev_lock); 2084 2085 drm_dev_put(&i915->drm); 2086 return ret; 2087 } 2088 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2089 2090 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2091 #include "selftest_rps.c" 2092 #endif 2093