Lines Matching refs:sg_cpu
198 static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost) in sugov_get_util() argument
200 unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu); in sugov_get_util()
203 util += cpu_util_cfs_boost(sg_cpu->cpu); in sugov_get_util()
204 util = effective_cpu_util(sg_cpu->cpu, util, &min, &max); in sugov_get_util()
206 sg_cpu->bw_min = min; in sugov_get_util()
207 sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max); in sugov_get_util()
221 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_reset() argument
224 s64 delta_ns = time - sg_cpu->last_update; in sugov_iowait_reset()
230 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; in sugov_iowait_reset()
231 sg_cpu->iowait_boost_pending = set_iowait_boost; in sugov_iowait_reset()
250 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_boost() argument
256 if (sg_cpu->iowait_boost && in sugov_iowait_boost()
257 sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) in sugov_iowait_boost()
265 if (sg_cpu->iowait_boost_pending) in sugov_iowait_boost()
267 sg_cpu->iowait_boost_pending = true; in sugov_iowait_boost()
270 if (sg_cpu->iowait_boost) { in sugov_iowait_boost()
271 sg_cpu->iowait_boost = in sugov_iowait_boost()
272 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); in sugov_iowait_boost()
277 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; in sugov_iowait_boost()
298 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, in sugov_iowait_apply() argument
302 if (!sg_cpu->iowait_boost) in sugov_iowait_apply()
306 if (sugov_iowait_reset(sg_cpu, time, false)) in sugov_iowait_apply()
309 if (!sg_cpu->iowait_boost_pending) { in sugov_iowait_apply()
313 sg_cpu->iowait_boost >>= 1; in sugov_iowait_apply()
314 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { in sugov_iowait_apply()
315 sg_cpu->iowait_boost = 0; in sugov_iowait_apply()
320 sg_cpu->iowait_boost_pending = false; in sugov_iowait_apply()
326 return (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT; in sugov_iowait_apply()
330 static bool sugov_hold_freq(struct sugov_cpu *sg_cpu) in sugov_hold_freq() argument
344 if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu))) in sugov_hold_freq()
351 idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); in sugov_hold_freq()
352 ret = idle_calls == sg_cpu->saved_idle_calls; in sugov_hold_freq()
354 sg_cpu->saved_idle_calls = idle_calls; in sugov_hold_freq()
358 static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; } in sugov_hold_freq() argument
365 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) in ignore_dl_rate_limit() argument
367 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min) in ignore_dl_rate_limit()
368 sg_cpu->sg_policy->limits_changed = true; in ignore_dl_rate_limit()
371 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, in sugov_update_single_common() argument
377 sugov_iowait_boost(sg_cpu, time, flags); in sugov_update_single_common()
378 sg_cpu->last_update = time; in sugov_update_single_common()
380 ignore_dl_rate_limit(sg_cpu); in sugov_update_single_common()
382 if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) in sugov_update_single_common()
385 boost = sugov_iowait_apply(sg_cpu, time, max_cap); in sugov_update_single_common()
386 sugov_get_util(sg_cpu, boost); in sugov_update_single_common()
394 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_single_freq() local
395 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_single_freq()
400 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); in sugov_update_single_freq()
402 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags)) in sugov_update_single_freq()
405 next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap); in sugov_update_single_freq()
407 if (sugov_hold_freq(sg_cpu) && next_f < sg_policy->next_freq && in sugov_update_single_freq()
435 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_single_perf() local
436 unsigned long prev_util = sg_cpu->util; in sugov_update_single_perf()
449 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); in sugov_update_single_perf()
451 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags)) in sugov_update_single_perf()
454 if (sugov_hold_freq(sg_cpu) && sg_cpu->util < prev_util) in sugov_update_single_perf()
455 sg_cpu->util = prev_util; in sugov_update_single_perf()
457 cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min, in sugov_update_single_perf()
458 sg_cpu->util, max_cap); in sugov_update_single_perf()
460 sg_cpu->sg_policy->last_freq_update_time = time; in sugov_update_single_perf()
463 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) in sugov_next_freq_shared() argument
465 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_next_freq_shared()
470 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu); in sugov_next_freq_shared()
488 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); in sugov_update_shared() local
489 struct sugov_policy *sg_policy = sg_cpu->sg_policy; in sugov_update_shared()
494 sugov_iowait_boost(sg_cpu, time, flags); in sugov_update_shared()
495 sg_cpu->last_update = time; in sugov_update_shared()
497 ignore_dl_rate_limit(sg_cpu); in sugov_update_shared()
500 next_f = sugov_next_freq_shared(sg_cpu, time); in sugov_update_shared()
838 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); in sugov_start() local
840 memset(sg_cpu, 0, sizeof(*sg_cpu)); in sugov_start()
841 sg_cpu->cpu = cpu; in sugov_start()
842 sg_cpu->sg_policy = sg_policy; in sugov_start()
843 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu); in sugov_start()