Lines Matching +full:always +full:- +full:running
2 #include "sched-pelt.h"
7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
16 return READ_ONCE(rq->avg_hw.load_avg); in hw_load_avg()
32 int update_irq_load_avg(struct rq *rq, u64 running);
35 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
41 #define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
45 return PELT_MIN_DIVIDER + avg->period_contrib; in get_pelt_divider()
56 enqueued = avg->util_est; in cfs_se_util_change()
62 WRITE_ONCE(avg->util_est, enqueued); in cfs_se_util_change()
70 return rq->clock_pelt - rq->lost_idle_time; in rq_clock_pelt()
76 rq->clock_pelt = rq_clock_task(rq); in _update_idle_rq_clock_pelt()
78 u64_u32_store(rq->clock_idle, rq_clock(rq)); in _update_idle_rq_clock_pelt()
81 u64_u32_store(rq->clock_pelt_idle, rq_clock_pelt(rq)); in _update_idle_rq_clock_pelt()
86 * computation done during the running delta time but then sync back to
91 * @ max capacity ------******---------------******---------------
92 * @ half capacity ------************---------************---------
98 if (unlikely(is_idle_task(rq->curr))) { in update_rq_clock_pelt()
108 * Running longer results in stealing idle time that will in update_rq_clock_pelt()
122 rq->clock_pelt += delta; in update_rq_clock_pelt()
129 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
136 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX; in update_idle_rq_clock_pelt()
137 u32 util_sum = rq->cfs.avg.util_sum; in update_idle_rq_clock_pelt()
138 util_sum += rq->avg_rt.util_sum; in update_idle_rq_clock_pelt()
139 util_sum += rq->avg_dl.util_sum; in update_idle_rq_clock_pelt()
145 * considered as an always running rq without idle time to in update_idle_rq_clock_pelt()
151 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; in update_idle_rq_clock_pelt()
161 if (unlikely(cfs_rq->throttle_count)) in update_idle_cfs_rq_clock_pelt()
164 throttled = cfs_rq->throttled_clock_pelt_time; in update_idle_cfs_rq_clock_pelt()
166 u64_u32_store(cfs_rq->throttled_pelt_idle, throttled); in update_idle_cfs_rq_clock_pelt()
169 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
172 if (unlikely(cfs_rq->throttle_count)) in cfs_rq_clock_pelt()
173 return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
175 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
194 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
200 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
217 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument