Lines Matching +full:max +full:- +full:freq

1 // SPDX-License-Identifier: MIT
43 return rps_to_gt(rps)->i915; in rps_to_i915()
48 return rps_to_gt(rps)->uncore; in rps_to_uncore()
55 return &gt_to_guc(gt)->slpc; in rps_to_slpc()
62 return intel_uc_uses_guc_slpc(&gt->uc); in rps_uses_slpc()
67 return mask & ~rps->pm_intrmsk_mbz; in rps_pm_sanitize_mask()
90 last = engine->stats.rps; in rps_timer()
91 engine->stats.rps = dt; in rps_timer()
99 last = rps->pm_timestamp; in rps_timer()
100 rps->pm_timestamp = timestamp; in rps_timer()
115 * video decode on vcs followed by colour post-processing in rps_timer()
116 * on vecs, followed by general post-processing on rcs. in rps_timer()
117 * Since multi-engines being active does imply a single in rps_timer()
130 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", in rps_timer()
133 rps->pm_interval); in rps_timer()
135 if (100 * busy > rps->power.up_threshold * dt && in rps_timer()
136 rps->cur_freq < rps->max_freq_softlimit) { in rps_timer()
137 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; in rps_timer()
138 rps->pm_interval = 1; in rps_timer()
139 queue_work(gt->i915->unordered_wq, &rps->work); in rps_timer()
140 } else if (100 * busy < rps->power.down_threshold * dt && in rps_timer()
141 rps->cur_freq > rps->min_freq_softlimit) { in rps_timer()
142 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; in rps_timer()
143 rps->pm_interval = 1; in rps_timer()
144 queue_work(gt->i915->unordered_wq, &rps->work); in rps_timer()
146 rps->last_adj = 0; in rps_timer()
149 mod_timer(&rps->timer, in rps_timer()
150 jiffies + msecs_to_jiffies(rps->pm_interval)); in rps_timer()
151 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); in rps_timer()
157 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); in rps_start_timer()
158 rps->pm_interval = 1; in rps_start_timer()
159 mod_timer(&rps->timer, jiffies + 1); in rps_start_timer()
164 del_timer_sync(&rps->timer); in rps_stop_timer()
165 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); in rps_stop_timer()
166 cancel_work_sync(&rps->work); in rps_stop_timer()
174 if (val > rps->min_freq_softlimit) in rps_pm_mask()
179 if (val < rps->max_freq_softlimit) in rps_pm_mask()
182 mask &= rps->pm_events; in rps_pm_mask()
189 memset(&rps->ei, 0, sizeof(rps->ei)); in rps_reset_ei()
198 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", in rps_enable_interrupts()
199 rps->pm_events, rps_pm_mask(rps, rps->last_freq)); in rps_enable_interrupts()
203 spin_lock_irq(gt->irq_lock); in rps_enable_interrupts()
204 gen6_gt_pm_enable_irq(gt, rps->pm_events); in rps_enable_interrupts()
205 spin_unlock_irq(gt->irq_lock); in rps_enable_interrupts()
207 intel_uncore_write(gt->uncore, in rps_enable_interrupts()
208 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); in rps_enable_interrupts()
226 spin_lock_irq(gt->irq_lock); in rps_reset_interrupts()
227 if (GRAPHICS_VER(gt->i915) >= 11) in rps_reset_interrupts()
232 rps->pm_iir = 0; in rps_reset_interrupts()
233 spin_unlock_irq(gt->irq_lock); in rps_reset_interrupts()
240 intel_uncore_write(gt->uncore, in rps_disable_interrupts()
243 spin_lock_irq(gt->irq_lock); in rps_disable_interrupts()
245 spin_unlock_irq(gt->irq_lock); in rps_disable_interrupts()
247 intel_synchronize_irq(gt->i915); in rps_disable_interrupts()
255 cancel_work_sync(&rps->work); in rps_disable_interrupts()
283 if (i915->fsb_freq <= 3200000) in gen5_rps_init()
285 else if (i915->fsb_freq <= 4800000) in gen5_rps_init()
292 cparams[i].t == DIV_ROUND_CLOSEST(i915->mem_freq, 1000)) { in gen5_rps_init()
293 rps->ips.m = cparams[i].m; in gen5_rps_init()
294 rps->ips.c = cparams[i].c; in gen5_rps_init()
301 /* Set up min, max, and cur for interrupt handling */ in gen5_rps_init()
306 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", in gen5_rps_init()
309 rps->min_freq = fmax; in gen5_rps_init()
310 rps->efficient_freq = fstart; in gen5_rps_init()
311 rps->max_freq = fmin; in gen5_rps_init()
326 * Prevent division-by-zero if we are asking too fast. in __ips_chipset_val()
331 dt = now - ips->last_time1; in __ips_chipset_val()
333 return ips->chipset_power; in __ips_chipset_val()
335 /* FIXME: handle per-counter overflow */ in __ips_chipset_val()
340 delta = total - ips->last_count1; in __ips_chipset_val()
342 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); in __ips_chipset_val()
344 ips->last_count1 = total; in __ips_chipset_val()
345 ips->last_time1 = now; in __ips_chipset_val()
347 ips->chipset_power = result; in __ips_chipset_val()
363 return m * x / 127 - b; in ips_mch_val()
381 if (INTEL_INFO(i915)->is_mobile) in pvid_to_extvid()
382 return max(vd - 1125, 0); in pvid_to_extvid()
397 dt = now - ips->last_time2; in __gen5_ips_update()
405 delta = count - ips->last_count2; in __gen5_ips_update()
407 ips->last_count2 = count; in __gen5_ips_update()
408 ips->last_time2 = now; in __gen5_ips_update()
411 ips->gfx_power = div_u64(delta * 1181, dt * 10); in __gen5_ips_update()
417 __gen5_ips_update(&rps->ips); in gen5_rps_update()
425 val = rps->max_freq - val; in gen5_invert_freq()
426 val = rps->min_freq + val; in gen5_invert_freq()
440 drm_dbg(&rps_to_i915(rps)->drm, in __gen5_rps_set()
442 return -EBUSY; /* still busy with another command */ in __gen5_rps_set()
504 /* Program P-state weights to account for frequency power adjustment */ in init_emon()
507 unsigned int freq = intel_pxfreq(pxvidfreq); in init_emon() local
512 val = vid * vid * freq / 1000 * 255; in init_emon()
571 /* Set max/min thresholds to 90ms and 80ms respectively */ in gen5_rps_enable()
577 /* Set up min, max, and cur for interrupt handling */ in gen5_rps_enable()
596 drm_err(&uncore->i915->drm, in gen5_rps_enable()
600 __gen5_rps_set(rps, rps->cur_freq); in gen5_rps_enable()
602 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); in gen5_rps_enable()
603 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); in gen5_rps_enable()
604 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); in gen5_rps_enable()
605 rps->ips.last_time1 = jiffies_to_msecs(jiffies); in gen5_rps_enable()
607 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); in gen5_rps_enable()
608 rps->ips.last_time2 = ktime_get_raw_ns(); in gen5_rps_enable()
610 spin_lock(&i915->irq_lock); in gen5_rps_enable()
612 spin_unlock(&i915->irq_lock); in gen5_rps_enable()
616 rps->ips.corr = init_emon(uncore); in gen5_rps_enable()
629 spin_lock(&i915->irq_lock); in gen5_rps_disable()
631 spin_unlock(&i915->irq_lock); in gen5_rps_disable()
640 __gen5_rps_set(rps, rps->idle_freq); in gen5_rps_disable()
662 limits = rps->max_freq_softlimit << 23; in rps_limits()
663 if (val <= rps->min_freq_softlimit) in rps_limits()
664 limits |= rps->min_freq_softlimit << 14; in rps_limits()
666 limits = rps->max_freq_softlimit << 24; in rps_limits()
667 if (val <= rps->min_freq_softlimit) in rps_limits()
668 limits |= rps->min_freq_softlimit << 16; in rps_limits()
677 struct intel_uncore *uncore = gt->uncore; in rps_set_power()
680 lockdep_assert_held(&rps->power.mutex); in rps_set_power()
682 if (new_power == rps->power.mode) in rps_set_power()
704 * sw freq adjustments, this restriction can be lifted. in rps_set_power()
706 if (IS_VALLEYVIEW(gt->i915)) in rps_set_power()
712 rps->power.up_threshold, ei_up, in rps_set_power()
713 rps->power.down_threshold, ei_down); in rps_set_power()
719 ei_up * rps->power.up_threshold * 10)); in rps_set_power()
726 rps->power.down_threshold * 10)); in rps_set_power()
729 (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | in rps_set_power()
737 rps->power.mode = new_power; in rps_set_power()
744 new_power = rps->power.mode; in gen6_rps_set_thresholds()
745 switch (rps->power.mode) { in gen6_rps_set_thresholds()
747 if (val > rps->efficient_freq + 1 && in gen6_rps_set_thresholds()
748 val > rps->cur_freq) in gen6_rps_set_thresholds()
753 if (val <= rps->efficient_freq && in gen6_rps_set_thresholds()
754 val < rps->cur_freq) in gen6_rps_set_thresholds()
756 else if (val >= rps->rp0_freq && in gen6_rps_set_thresholds()
757 val > rps->cur_freq) in gen6_rps_set_thresholds()
762 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && in gen6_rps_set_thresholds()
763 val < rps->cur_freq) in gen6_rps_set_thresholds()
767 /* Max/min bins are special */ in gen6_rps_set_thresholds()
768 if (val <= rps->min_freq_softlimit) in gen6_rps_set_thresholds()
770 if (val >= rps->max_freq_softlimit) in gen6_rps_set_thresholds()
773 mutex_lock(&rps->power.mutex); in gen6_rps_set_thresholds()
774 if (rps->power.interactive) in gen6_rps_set_thresholds()
777 mutex_unlock(&rps->power.mutex); in gen6_rps_set_thresholds()
785 mutex_lock(&rps->power.mutex); in intel_rps_mark_interactive()
787 if (!rps->power.interactive++ && intel_rps_is_active(rps)) in intel_rps_mark_interactive()
790 GEM_BUG_ON(!rps->power.interactive); in intel_rps_mark_interactive()
791 rps->power.interactive--; in intel_rps_mark_interactive()
793 mutex_unlock(&rps->power.mutex); in intel_rps_mark_interactive()
814 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", in gen6_rps_set()
829 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", in vlv_rps_set()
840 if (val == rps->last_freq) in rps_set()
854 rps->last_freq = val; in rps_set()
864 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); in intel_rps_unpark()
870 mutex_lock(&rps->lock); in intel_rps_unpark()
874 clamp(rps->cur_freq, in intel_rps_unpark()
875 rps->min_freq_softlimit, in intel_rps_unpark()
876 rps->max_freq_softlimit)); in intel_rps_unpark()
878 mutex_unlock(&rps->lock); in intel_rps_unpark()
880 rps->pm_iir = 0; in intel_rps_unpark()
905 if (rps->last_freq <= rps->idle_freq) in intel_rps_park()
922 rps_set(rps, rps->idle_freq, false); in intel_rps_park()
935 adj = rps->last_adj; in intel_rps_park()
939 adj = -2; in intel_rps_park()
940 rps->last_adj = adj; in intel_rps_park()
941 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); in intel_rps_park()
942 if (rps->cur_freq < rps->efficient_freq) { in intel_rps_park()
943 rps->cur_freq = rps->efficient_freq; in intel_rps_park()
944 rps->last_adj = 0; in intel_rps_park()
947 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); in intel_rps_park()
957 return slpc->boost_freq; in intel_rps_get_boost_frequency()
959 return intel_gpu_freq(rps, rps->boost_freq); in intel_rps_get_boost_frequency()
969 if (val < rps->min_freq || val > rps->max_freq) in rps_set_boost_freq()
970 return -EINVAL; in rps_set_boost_freq()
972 mutex_lock(&rps->lock); in rps_set_boost_freq()
973 if (val != rps->boost_freq) { in rps_set_boost_freq()
974 rps->boost_freq = val; in rps_set_boost_freq()
975 boost = atomic_read(&rps->num_waiters); in rps_set_boost_freq()
977 mutex_unlock(&rps->lock); in rps_set_boost_freq()
979 queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work); in rps_set_boost_freq()
984 int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) in intel_rps_set_boost_frequency() argument
991 return intel_guc_slpc_set_boost_freq(slpc, freq); in intel_rps_set_boost_frequency()
993 return rps_set_boost_freq(rps, freq); in intel_rps_set_boost_frequency()
1006 atomic_dec(&rps->num_waiters); in intel_rps_dec_waiters()
1017 /* Waitboost is not needed for contexts marked with a Freq hint */ in intel_rps_boost()
1018 if (test_bit(CONTEXT_LOW_LATENCY, &rq->context->flags)) in intel_rps_boost()
1022 if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { in intel_rps_boost()
1023 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; in intel_rps_boost()
1028 if (slpc->min_freq_softlimit >= slpc->boost_freq) in intel_rps_boost()
1032 if (!atomic_fetch_inc(&slpc->num_waiters)) { in intel_rps_boost()
1034 rq->fence.context, rq->fence.seqno); in intel_rps_boost()
1035 queue_work(rps_to_gt(rps)->i915->unordered_wq, in intel_rps_boost()
1036 &slpc->boost_work); in intel_rps_boost()
1042 if (atomic_fetch_inc(&rps->num_waiters)) in intel_rps_boost()
1049 rq->fence.context, rq->fence.seqno); in intel_rps_boost()
1051 if (READ_ONCE(rps->cur_freq) < rps->boost_freq) in intel_rps_boost()
1052 queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work); in intel_rps_boost()
1054 WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */ in intel_rps_boost()
1062 lockdep_assert_held(&rps->lock); in intel_rps_set()
1063 GEM_BUG_ON(val > rps->max_freq); in intel_rps_set()
1064 GEM_BUG_ON(val < rps->min_freq); in intel_rps_set()
1085 rps->cur_freq = val; in intel_rps_set()
1104 u32 rp_state_cap = rps_to_gt(rps)->type == GT_MEDIA ? in mtl_get_freq_caps()
1107 u32 rpe = rps_to_gt(rps)->type == GT_MEDIA ? in mtl_get_freq_caps()
1112 caps->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, rp_state_cap); in mtl_get_freq_caps()
1113 caps->min_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, rp_state_cap); in mtl_get_freq_caps()
1114 caps->rp1_freq = REG_FIELD_GET(MTL_RPE_MASK, rpe); in mtl_get_freq_caps()
1127 caps->rp0_freq = (rp_state_cap >> 16) & 0xff; in __gen6_rps_get_freq_caps()
1128 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; in __gen6_rps_get_freq_caps()
1129 caps->min_freq = (rp_state_cap >> 0) & 0xff; in __gen6_rps_get_freq_caps()
1131 caps->rp0_freq = (rp_state_cap >> 0) & 0xff; in __gen6_rps_get_freq_caps()
1133 caps->rp1_freq = REG_FIELD_GET(RPE_MASK, in __gen6_rps_get_freq_caps()
1134 intel_uncore_read(to_gt(i915)->uncore, in __gen6_rps_get_freq_caps()
1137 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; in __gen6_rps_get_freq_caps()
1138 caps->min_freq = (rp_state_cap >> 16) & 0xff; in __gen6_rps_get_freq_caps()
1147 caps->rp0_freq *= GEN9_FREQ_SCALER; in __gen6_rps_get_freq_caps()
1148 caps->rp1_freq *= GEN9_FREQ_SCALER; in __gen6_rps_get_freq_caps()
1149 caps->min_freq *= GEN9_FREQ_SCALER; in __gen6_rps_get_freq_caps()
1154 * gen6_rps_get_freq_caps - Get freq caps exposed by HW
1156 * @caps: returned freq caps
1177 rps->rp0_freq = caps.rp0_freq; in gen6_rps_init()
1178 rps->rp1_freq = caps.rp1_freq; in gen6_rps_init()
1179 rps->min_freq = caps.min_freq; in gen6_rps_init()
1182 rps->max_freq = rps->rp0_freq; in gen6_rps_init()
1184 rps->efficient_freq = rps->rp1_freq; in gen6_rps_init()
1192 if (snb_pcode_read(rps_to_gt(rps)->uncore, in gen6_rps_init()
1195 rps->efficient_freq = in gen6_rps_init()
1198 rps->min_freq, in gen6_rps_init()
1199 rps->max_freq); in gen6_rps_init()
1208 rps->power.mode = -1; in rps_reset()
1209 rps->last_freq = -1; in rps_reset()
1211 if (rps_set(rps, rps->min_freq, true)) { in rps_reset()
1212 drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); in rps_reset()
1216 rps->cur_freq = rps->min_freq; in rps_reset()
1224 struct intel_uncore *uncore = gt->uncore; in gen9_rps_enable()
1227 if (GRAPHICS_VER(gt->i915) == 9) in gen9_rps_enable()
1229 GEN9_FREQUENCY(rps->rp1_freq)); in gen9_rps_enable()
1233 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; in gen9_rps_enable()
1243 HSW_FREQUENCY(rps->rp1_freq)); in gen8_rps_enable()
1247 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; in gen8_rps_enable()
1260 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | in gen6_rps_enable()
1275 switch (gt->info.sseu.eu_total) { in chv_rps_max_freq()
1350 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | in chv_rps_enable()
1365 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, in chv_rps_enable()
1368 drm_dbg(&i915->drm, "GPLL enabled? %s\n", in chv_rps_enable()
1370 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); in chv_rps_enable()
1396 /* Clamp to max */ in vlv_rps_max_freq()
1424 * a BYT-M B0 the above register contains 0xbf. Moreover when setting in vlv_rps_min_freq()
1454 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; in vlv_rps_enable()
1467 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, in vlv_rps_enable()
1470 drm_dbg(&i915->drm, "GPLL enabled? %s\n", in vlv_rps_enable()
1472 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); in vlv_rps_enable()
1487 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); in __ips_gfx_val()
1504 corr = div_u64(corr * 150142 * state1, 10000) - 78642; in __ips_gfx_val()
1505 corr2 = div_u64(corr, 100000) * ips->corr; in __ips_gfx_val()
1512 return ips->gfx_power + state2; in __ips_gfx_val()
1543 if (rps->max_freq <= rps->min_freq) in intel_rps_enable()
1564 "min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n", in intel_rps_enable()
1565 rps->min_freq, rps->max_freq, in intel_rps_enable()
1566 intel_gpu_freq(rps, rps->min_freq), in intel_rps_enable()
1567 intel_gpu_freq(rps, rps->max_freq), in intel_rps_enable()
1568 rps->power.up_threshold, in intel_rps_enable()
1569 rps->power.down_threshold); in intel_rps_enable()
1571 GEM_BUG_ON(rps->max_freq < rps->min_freq); in intel_rps_enable()
1572 GEM_BUG_ON(rps->idle_freq > rps->max_freq); in intel_rps_enable()
1574 GEM_BUG_ON(rps->efficient_freq < rps->min_freq); in intel_rps_enable()
1575 GEM_BUG_ON(rps->efficient_freq > rps->max_freq); in intel_rps_enable()
1612 * N = val - 0xb7 in byt_gpu_freq()
1615 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); in byt_gpu_freq()
1620 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; in byt_freq_opcode()
1629 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); in chv_gpu_freq()
1635 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; in chv_freq_opcode()
1676 rps->gpll_ref_freq = in vlv_init_gpll_ref_freq()
1679 i915->czclk_freq); in vlv_init_gpll_ref_freq()
1681 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", in vlv_init_gpll_ref_freq()
1682 rps->gpll_ref_freq); in vlv_init_gpll_ref_freq()
1696 rps->max_freq = vlv_rps_max_freq(rps); in vlv_rps_init()
1697 rps->rp0_freq = rps->max_freq; in vlv_rps_init()
1698 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", in vlv_rps_init()
1699 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); in vlv_rps_init()
1701 rps->efficient_freq = vlv_rps_rpe_freq(rps); in vlv_rps_init()
1702 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", in vlv_rps_init()
1703 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); in vlv_rps_init()
1705 rps->rp1_freq = vlv_rps_guar_freq(rps); in vlv_rps_init()
1706 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", in vlv_rps_init()
1707 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); in vlv_rps_init()
1709 rps->min_freq = vlv_rps_min_freq(rps); in vlv_rps_init()
1710 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", in vlv_rps_init()
1711 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); in vlv_rps_init()
1730 rps->max_freq = chv_rps_max_freq(rps); in chv_rps_init()
1731 rps->rp0_freq = rps->max_freq; in chv_rps_init()
1732 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", in chv_rps_init()
1733 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); in chv_rps_init()
1735 rps->efficient_freq = chv_rps_rpe_freq(rps); in chv_rps_init()
1736 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", in chv_rps_init()
1737 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); in chv_rps_init()
1739 rps->rp1_freq = chv_rps_guar_freq(rps); in chv_rps_init()
1740 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", in chv_rps_init()
1741 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); in chv_rps_init()
1743 rps->min_freq = chv_rps_min_freq(rps); in chv_rps_init()
1744 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", in chv_rps_init()
1745 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); in chv_rps_init()
1752 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | in chv_rps_init()
1753 rps->rp1_freq | rps->min_freq) & 1, in chv_rps_init()
1754 "Odd GPU freq values\n"); in chv_rps_init()
1759 ei->ktime = ktime_get_raw(); in vlv_c0_read()
1760 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); in vlv_c0_read()
1761 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); in vlv_c0_read()
1767 const struct intel_rps_ei *prev = &rps->ei; in vlv_wa_c0_ei()
1776 if (prev->ktime) { in vlv_wa_c0_ei()
1780 time = ktime_us_delta(now.ktime, prev->ktime); in vlv_wa_c0_ei()
1782 time *= rps_to_i915(rps)->czclk_freq; in vlv_wa_c0_ei()
1789 render = now.render_c0 - prev->render_c0; in vlv_wa_c0_ei()
1790 media = now.media_c0 - prev->media_c0; in vlv_wa_c0_ei()
1791 c0 = max(render, media); in vlv_wa_c0_ei()
1794 if (c0 > time * rps->power.up_threshold) in vlv_wa_c0_ei()
1796 else if (c0 < time * rps->power.down_threshold) in vlv_wa_c0_ei()
1800 rps->ei = now; in vlv_wa_c0_ei()
1810 int new_freq, adj, min, max; in rps_work() local
1813 spin_lock_irq(gt->irq_lock); in rps_work()
1814 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; in rps_work()
1815 client_boost = atomic_read(&rps->num_waiters); in rps_work()
1816 spin_unlock_irq(gt->irq_lock); in rps_work()
1822 mutex_lock(&rps->lock); in rps_work()
1824 mutex_unlock(&rps->lock); in rps_work()
1830 adj = rps->last_adj; in rps_work()
1831 new_freq = rps->cur_freq; in rps_work()
1832 min = rps->min_freq_softlimit; in rps_work()
1833 max = rps->max_freq_softlimit; in rps_work()
1835 max = rps->max_freq; in rps_work()
1838 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", in rps_work()
1840 adj, new_freq, min, max); in rps_work()
1842 if (client_boost && new_freq < rps->boost_freq) { in rps_work()
1843 new_freq = rps->boost_freq; in rps_work()
1849 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; in rps_work()
1851 if (new_freq >= rps->max_freq_softlimit) in rps_work()
1856 if (rps->cur_freq > rps->efficient_freq) in rps_work()
1857 new_freq = rps->efficient_freq; in rps_work()
1858 else if (rps->cur_freq > rps->min_freq_softlimit) in rps_work()
1859 new_freq = rps->min_freq_softlimit; in rps_work()
1865 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; in rps_work()
1867 if (new_freq <= rps->min_freq_softlimit) in rps_work()
1878 new_freq = clamp_t(int, new_freq, min, max); in rps_work()
1881 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); in rps_work()
1884 rps->last_adj = adj; in rps_work()
1886 mutex_unlock(&rps->lock); in rps_work()
1889 spin_lock_irq(gt->irq_lock); in rps_work()
1890 gen6_gt_pm_unmask_irq(gt, rps->pm_events); in rps_work()
1891 spin_unlock_irq(gt->irq_lock); in rps_work()
1897 const u32 events = rps->pm_events & pm_iir; in gen11_rps_irq_handler()
1899 lockdep_assert_held(gt->irq_lock); in gen11_rps_irq_handler()
1908 rps->pm_iir |= events; in gen11_rps_irq_handler()
1909 queue_work(gt->i915->unordered_wq, &rps->work); in gen11_rps_irq_handler()
1917 events = pm_iir & rps->pm_events; in gen6_rps_irq_handler()
1919 spin_lock(gt->irq_lock); in gen6_rps_irq_handler()
1924 rps->pm_iir |= events; in gen6_rps_irq_handler()
1926 queue_work(gt->i915->unordered_wq, &rps->work); in gen6_rps_irq_handler()
1927 spin_unlock(gt->irq_lock); in gen6_rps_irq_handler()
1930 if (GRAPHICS_VER(gt->i915) >= 8) in gen6_rps_irq_handler()
1934 intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10); in gen6_rps_irq_handler()
1937 drm_dbg(&rps_to_i915(rps)->drm, in gen6_rps_irq_handler()
1960 new_freq = rps->cur_freq; in gen5_rps_irq_handler()
1964 new_freq--; in gen5_rps_irq_handler()
1966 rps->min_freq_softlimit, in gen5_rps_irq_handler()
1967 rps->max_freq_softlimit); in gen5_rps_irq_handler()
1969 if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq)) in gen5_rps_irq_handler()
1970 rps->cur_freq = new_freq; in gen5_rps_irq_handler()
1977 mutex_init(&rps->lock); in intel_rps_init_early()
1978 mutex_init(&rps->power.mutex); in intel_rps_init_early()
1980 INIT_WORK(&rps->work, rps_work); in intel_rps_init_early()
1981 timer_setup(&rps->timer, rps_timer, 0); in intel_rps_init_early()
1983 atomic_set(&rps->num_waiters, 0); in intel_rps_init_early()
2003 rps->max_freq_softlimit = rps->max_freq; in intel_rps_init()
2004 rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit; in intel_rps_init()
2005 rps->min_freq_softlimit = rps->min_freq; in intel_rps_init()
2006 rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit; in intel_rps_init()
2008 /* After setting max-softlimit, find the overclock max freq */ in intel_rps_init()
2012 snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, &params, NULL); in intel_rps_init()
2014 drm_dbg(&i915->drm, in intel_rps_init()
2015 "Overclocking supported, max: %dMHz, overclock: %dMHz\n", in intel_rps_init()
2016 (rps->max_freq & 0xff) * 50, in intel_rps_init()
2018 rps->max_freq = params & 0xff; in intel_rps_init()
2023 rps->power.up_threshold = 95; in intel_rps_init()
2024 rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold; in intel_rps_init()
2025 rps->power.down_threshold = 85; in intel_rps_init()
2026 rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold; in intel_rps_init()
2028 /* Finally allow us to boost to max by default */ in intel_rps_init()
2029 rps->boost_freq = rps->max_freq; in intel_rps_init()
2030 rps->idle_freq = rps->min_freq; in intel_rps_init()
2033 rps->cur_freq = rps->efficient_freq; in intel_rps_init()
2035 rps->pm_intrmsk_mbz = 0; in intel_rps_init()
2044 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; in intel_rps_init()
2047 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; in intel_rps_init()
2050 if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc)) in intel_rps_init()
2051 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; in intel_rps_init()
2070 return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat); in intel_rps_read_rpstat()
2101 u32 freq; in __read_cagf() local
2104 * For Gen12+ reading freq from HW does not need a forcewake and in __read_cagf()
2105 * registers will return 0 freq when GT is in RC6 in __read_cagf()
2113 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); in __read_cagf()
2122 freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r); in __read_cagf()
2124 return intel_rps_get_cagf(rps, freq); in __read_cagf()
2134 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; in intel_rps_read_actual_frequency()
2136 u32 freq = 0; in intel_rps_read_actual_frequency() local
2139 freq = intel_gpu_freq(rps, read_cagf(rps)); in intel_rps_read_actual_frequency()
2141 return freq; in intel_rps_read_actual_frequency()
2152 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; in intel_rps_read_punit_req()
2154 u32 freq = 0; in intel_rps_read_punit_req() local
2157 freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); in intel_rps_read_punit_req()
2159 return freq; in intel_rps_read_punit_req()
2171 u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps)); in intel_rps_read_punit_req_frequency() local
2173 return intel_gpu_freq(rps, freq); in intel_rps_read_punit_req_frequency()
2181 return intel_gpu_freq(rps, rps->cur_freq); in intel_rps_get_requested_frequency()
2189 return slpc->max_freq_softlimit; in intel_rps_get_max_frequency()
2191 return intel_gpu_freq(rps, rps->max_freq_softlimit); in intel_rps_get_max_frequency()
2195 * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
2198 * Returns the max frequency in a raw format. In newer platforms raw is in
2204 u32 freq; in intel_rps_get_max_raw_freq() local
2207 return DIV_ROUND_CLOSEST(slpc->rp0_freq, in intel_rps_get_max_raw_freq()
2210 freq = rps->max_freq; in intel_rps_get_max_raw_freq()
2213 freq /= GEN9_FREQ_SCALER; in intel_rps_get_max_raw_freq()
2215 return freq; in intel_rps_get_max_raw_freq()
2224 return slpc->rp0_freq; in intel_rps_get_rp0_frequency()
2226 return intel_gpu_freq(rps, rps->rp0_freq); in intel_rps_get_rp0_frequency()
2234 return slpc->rp1_freq; in intel_rps_get_rp1_frequency()
2236 return intel_gpu_freq(rps, rps->rp1_freq); in intel_rps_get_rp1_frequency()
2244 return slpc->min_freq; in intel_rps_get_rpn_frequency()
2246 return intel_gpu_freq(rps, rps->min_freq); in intel_rps_get_rpn_frequency()
2252 struct drm_i915_private *i915 = gt->i915; in rps_frequency_dump()
2253 struct intel_uncore *uncore = gt->uncore; in rps_frequency_dump()
2343 rps->pm_intrmsk_mbz); in rps_frequency_dump()
2345 drm_printf(p, "Render p-state ratio: %d\n", in rps_frequency_dump()
2347 drm_printf(p, "Render p-state VID: %d\n", in rps_frequency_dump()
2349 drm_printf(p, "Render p-state limit: %d\n", in rps_frequency_dump()
2365 rps->power.up_threshold); in rps_frequency_dump()
2381 rps->power.down_threshold); in rps_frequency_dump()
2391 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", in rps_frequency_dump()
2393 drm_printf(p, "Max overclocked frequency: %dMHz\n", in rps_frequency_dump()
2394 intel_gpu_freq(rps, rps->max_freq)); in rps_frequency_dump()
2396 drm_printf(p, "Current freq: %d MHz\n", in rps_frequency_dump()
2397 intel_gpu_freq(rps, rps->cur_freq)); in rps_frequency_dump()
2398 drm_printf(p, "Actual freq: %d MHz\n", cagf); in rps_frequency_dump()
2399 drm_printf(p, "Idle freq: %d MHz\n", in rps_frequency_dump()
2400 intel_gpu_freq(rps, rps->idle_freq)); in rps_frequency_dump()
2401 drm_printf(p, "Min freq: %d MHz\n", in rps_frequency_dump()
2402 intel_gpu_freq(rps, rps->min_freq)); in rps_frequency_dump()
2403 drm_printf(p, "Boost freq: %d MHz\n", in rps_frequency_dump()
2404 intel_gpu_freq(rps, rps->boost_freq)); in rps_frequency_dump()
2405 drm_printf(p, "Max freq: %d MHz\n", in rps_frequency_dump()
2406 intel_gpu_freq(rps, rps->max_freq)); in rps_frequency_dump()
2409 intel_gpu_freq(rps, rps->efficient_freq)); in rps_frequency_dump()
2415 struct intel_uncore *uncore = gt->uncore; in slpc_frequency_dump()
2424 rps->pm_intrmsk_mbz); in slpc_frequency_dump()
2431 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", in slpc_frequency_dump()
2433 drm_printf(p, "Current freq: %d MHz\n", in slpc_frequency_dump()
2435 drm_printf(p, "Actual freq: %d MHz\n", in slpc_frequency_dump()
2437 drm_printf(p, "Min freq: %d MHz\n", in slpc_frequency_dump()
2439 drm_printf(p, "Boost freq: %d MHz\n", in slpc_frequency_dump()
2441 drm_printf(p, "Max freq: %d MHz\n", in slpc_frequency_dump()
2461 mutex_lock(&rps->lock); in set_max_freq()
2464 if (val < rps->min_freq || in set_max_freq()
2465 val > rps->max_freq || in set_max_freq()
2466 val < rps->min_freq_softlimit) { in set_max_freq()
2467 ret = -EINVAL; in set_max_freq()
2471 if (val > rps->rp0_freq) in set_max_freq()
2472 drm_dbg(&i915->drm, "User requested overclocking to %d\n", in set_max_freq()
2475 rps->max_freq_softlimit = val; in set_max_freq()
2477 val = clamp_t(int, rps->cur_freq, in set_max_freq()
2478 rps->min_freq_softlimit, in set_max_freq()
2479 rps->max_freq_softlimit); in set_max_freq()
2489 mutex_unlock(&rps->lock); in set_max_freq()
2509 return slpc->min_freq_softlimit; in intel_rps_get_min_frequency()
2511 return intel_gpu_freq(rps, rps->min_freq_softlimit); in intel_rps_get_min_frequency()
2515 * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
2524 u32 freq; in intel_rps_get_min_raw_freq() local
2527 return DIV_ROUND_CLOSEST(slpc->min_freq, in intel_rps_get_min_raw_freq()
2530 freq = rps->min_freq; in intel_rps_get_min_raw_freq()
2533 freq /= GEN9_FREQ_SCALER; in intel_rps_get_min_raw_freq()
2535 return freq; in intel_rps_get_min_raw_freq()
2543 mutex_lock(&rps->lock); in set_min_freq()
2546 if (val < rps->min_freq || in set_min_freq()
2547 val > rps->max_freq || in set_min_freq()
2548 val > rps->max_freq_softlimit) { in set_min_freq()
2549 ret = -EINVAL; in set_min_freq()
2553 rps->min_freq_softlimit = val; in set_min_freq()
2555 val = clamp_t(int, rps->cur_freq, in set_min_freq()
2556 rps->min_freq_softlimit, in set_min_freq()
2557 rps->max_freq_softlimit); in set_min_freq()
2567 mutex_unlock(&rps->lock); in set_min_freq()
2584 return rps->power.up_threshold; in intel_rps_get_up_threshold()
2592 return -EINVAL; in rps_set_threshold()
2594 ret = mutex_lock_interruptible(&rps->lock); in rps_set_threshold()
2604 rps->last_freq = -1; in rps_set_threshold()
2605 mutex_lock(&rps->power.mutex); in rps_set_threshold()
2606 rps->power.mode = -1; in rps_set_threshold()
2607 mutex_unlock(&rps->power.mutex); in rps_set_threshold()
2609 intel_rps_set(rps, clamp(rps->cur_freq, in rps_set_threshold()
2610 rps->min_freq_softlimit, in rps_set_threshold()
2611 rps->max_freq_softlimit)); in rps_set_threshold()
2614 mutex_unlock(&rps->lock); in rps_set_threshold()
2621 return rps_set_threshold(rps, &rps->power.up_threshold, threshold); in intel_rps_set_up_threshold()
2626 return rps->power.down_threshold; in intel_rps_get_down_threshold()
2631 return rps_set_threshold(rps, &rps->power.down_threshold, threshold); in intel_rps_set_down_threshold()
2647 mutex_lock(&rps->lock); in intel_rps_raise_unslice()
2662 intel_rps_set(rps, rps->rp0_freq); in intel_rps_raise_unslice()
2665 mutex_unlock(&rps->lock); in intel_rps_raise_unslice()
2672 mutex_lock(&rps->lock); in intel_rps_lower_unslice()
2687 intel_rps_set(rps, rps->min_freq); in intel_rps_lower_unslice()
2690 mutex_unlock(&rps->lock); in intel_rps_lower_unslice()
2699 with_intel_runtime_pm(gt->uncore->rpm, wakeref) in rps_read_mmio()
2700 val = intel_uncore_read(gt->uncore, reg32); in rps_read_mmio()
2740 * We only register the i915 ips part with intel-ips once everything is in intel_rps_driver_register()
2741 * set up, to avoid intel-ips sneaking in and reading bogus values. in intel_rps_driver_register()
2743 if (GRAPHICS_VER(gt->i915) == 5) { in intel_rps_driver_register()
2745 rcu_assign_pointer(ips_mchdev, gt->i915); in intel_rps_driver_register()
2762 if (i915 && !kref_get_unless_zero(&i915->drm.ref)) in mchdev_get()
2770 * i915_read_mch_val - return value for IPS use
2786 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { in i915_read_mch_val()
2787 struct intel_ips *ips = &to_gt(i915)->rps.ips; in i915_read_mch_val()
2795 drm_dev_put(&i915->drm); in i915_read_mch_val()
2801 * i915_gpu_raise - raise GPU frequency limit
2814 rps = &to_gt(i915)->rps; in i915_gpu_raise()
2817 if (rps->max_freq_softlimit < rps->max_freq) in i915_gpu_raise()
2818 rps->max_freq_softlimit++; in i915_gpu_raise()
2821 drm_dev_put(&i915->drm); in i915_gpu_raise()
2827 * i915_gpu_lower - lower GPU frequency limit
2841 rps = &to_gt(i915)->rps; in i915_gpu_lower()
2844 if (rps->max_freq_softlimit > rps->min_freq) in i915_gpu_lower()
2845 rps->max_freq_softlimit--; in i915_gpu_lower()
2848 drm_dev_put(&i915->drm); in i915_gpu_lower()
2854 * i915_gpu_busy - indicate GPU business to IPS
2867 ret = to_gt(i915)->awake; in i915_gpu_busy()
2869 drm_dev_put(&i915->drm); in i915_gpu_busy()
2875 * i915_gpu_turbo_disable - disable graphics turbo
2877 * Disable graphics turbo by resetting the max frequency and setting the
2890 rps = &to_gt(i915)->rps; in i915_gpu_turbo_disable()
2893 rps->max_freq_softlimit = rps->min_freq; in i915_gpu_turbo_disable()
2894 ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq); in i915_gpu_turbo_disable()
2897 drm_dev_put(&i915->drm); in i915_gpu_turbo_disable()